codegen-arm.cc revision 50ef84f5fad2def87d3fbc737bec4a32711fdef4
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compiler.h"
35#include "debug.h"
36#include "ic-inl.h"
37#include "jsregexp.h"
38#include "jump-target-light-inl.h"
39#include "parser.h"
40#include "regexp-macro-assembler.h"
41#include "regexp-stack.h"
42#include "register-allocator-inl.h"
43#include "runtime.h"
44#include "scopes.h"
45#include "virtual-frame-inl.h"
46#include "virtual-frame-arm-inl.h"
47
48namespace v8 {
49namespace internal {
50
51
52static void EmitIdenticalObjectComparison(MacroAssembler* masm,
53                                          Label* slow,
54                                          Condition cc,
55                                          bool never_nan_nan);
56static void EmitSmiNonsmiComparison(MacroAssembler* masm,
57                                    Register lhs,
58                                    Register rhs,
59                                    Label* lhs_not_nan,
60                                    Label* slow,
61                                    bool strict);
62static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
63static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
64                                           Register lhs,
65                                           Register rhs);
66static void MultiplyByKnownInt(MacroAssembler* masm,
67                               Register source,
68                               Register destination,
69                               int known_int);
70static bool IsEasyToMultiplyBy(int x);
71
72
73#define __ ACCESS_MASM(masm_)
74
75// -------------------------------------------------------------------------
76// Platform-specific DeferredCode functions.
77
78void DeferredCode::SaveRegisters() {
79  // On ARM you either have a completely spilled frame or you
80  // handle it yourself, but at the moment there's no automation
81  // of registers and deferred code.
82}
83
84
85void DeferredCode::RestoreRegisters() {
86}
87
88
89// -------------------------------------------------------------------------
90// Platform-specific RuntimeCallHelper functions.
91
92void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
93  frame_state_->frame()->AssertIsSpilled();
94}
95
96
97void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
98}
99
100
101void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
102  masm->EnterInternalFrame();
103}
104
105
106void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
107  masm->LeaveInternalFrame();
108}
109
110
111// -------------------------------------------------------------------------
112// CodeGenState implementation.
113
114CodeGenState::CodeGenState(CodeGenerator* owner)
115    : owner_(owner),
116      previous_(owner->state()) {
117  owner->set_state(this);
118}
119
120
121ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
122                                             JumpTarget* true_target,
123                                             JumpTarget* false_target)
124    : CodeGenState(owner),
125      true_target_(true_target),
126      false_target_(false_target) {
127  owner->set_state(this);
128}
129
130
131TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
132                                           Slot* slot,
133                                           TypeInfo type_info)
134    : CodeGenState(owner),
135      slot_(slot) {
136  owner->set_state(this);
137  old_type_info_ = owner->set_type_info(slot, type_info);
138}
139
140
141CodeGenState::~CodeGenState() {
142  ASSERT(owner_->state() == this);
143  owner_->set_state(previous_);
144}
145
146
147TypeInfoCodeGenState::~TypeInfoCodeGenState() {
148  owner()->set_type_info(slot_, old_type_info_);
149}
150
151// -------------------------------------------------------------------------
152// CodeGenerator implementation
153
154int CodeGenerator::inlined_write_barrier_size_ = -1;
155
156CodeGenerator::CodeGenerator(MacroAssembler* masm)
157    : deferred_(8),
158      masm_(masm),
159      info_(NULL),
160      frame_(NULL),
161      allocator_(NULL),
162      cc_reg_(al),
163      state_(NULL),
164      loop_nesting_(0),
165      type_info_(NULL),
166      function_return_(JumpTarget::BIDIRECTIONAL),
167      function_return_is_shadowed_(false) {
168}
169
170
171// Calling conventions:
172// fp: caller's frame pointer
173// sp: stack pointer
174// r1: called JS function
175// cp: callee's context
176
177void CodeGenerator::Generate(CompilationInfo* info) {
178  // Record the position for debugging purposes.
179  CodeForFunctionPosition(info->function());
180  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
181
182  // Initialize state.
183  info_ = info;
184
185  int slots = scope()->num_parameters() + scope()->num_stack_slots();
186  ScopedVector<TypeInfo> type_info_array(slots);
187  type_info_ = &type_info_array;
188
189  ASSERT(allocator_ == NULL);
190  RegisterAllocator register_allocator(this);
191  allocator_ = &register_allocator;
192  ASSERT(frame_ == NULL);
193  frame_ = new VirtualFrame();
194  cc_reg_ = al;
195
196  // Adjust for function-level loop nesting.
197  ASSERT_EQ(0, loop_nesting_);
198  loop_nesting_ = info->loop_nesting();
199
200  {
201    CodeGenState state(this);
202
203    // Entry:
204    // Stack: receiver, arguments
205    // lr: return address
206    // fp: caller's frame pointer
207    // sp: stack pointer
208    // r1: called JS function
209    // cp: callee's context
210    allocator_->Initialize();
211
212#ifdef DEBUG
213    if (strlen(FLAG_stop_at) > 0 &&
214        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
215      frame_->SpillAll();
216      __ stop("stop-at");
217    }
218#endif
219
220    if (info->mode() == CompilationInfo::PRIMARY) {
221      frame_->Enter();
222      // tos: code slot
223
224      // Allocate space for locals and initialize them.  This also checks
225      // for stack overflow.
226      frame_->AllocateStackSlots();
227
228      frame_->AssertIsSpilled();
229      int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
230      if (heap_slots > 0) {
231        // Allocate local context.
232        // Get outer context and create a new context based on it.
233        __ ldr(r0, frame_->Function());
234        frame_->EmitPush(r0);
235        if (heap_slots <= FastNewContextStub::kMaximumSlots) {
236          FastNewContextStub stub(heap_slots);
237          frame_->CallStub(&stub, 1);
238        } else {
239          frame_->CallRuntime(Runtime::kNewContext, 1);
240        }
241
242#ifdef DEBUG
243        JumpTarget verified_true;
244        __ cmp(r0, cp);
245        verified_true.Branch(eq);
246        __ stop("NewContext: r0 is expected to be the same as cp");
247        verified_true.Bind();
248#endif
249        // Update context local.
250        __ str(cp, frame_->Context());
251      }
252
253      // TODO(1241774): Improve this code:
254      // 1) only needed if we have a context
255      // 2) no need to recompute context ptr every single time
256      // 3) don't copy parameter operand code from SlotOperand!
257      {
258        Comment cmnt2(masm_, "[ copy context parameters into .context");
259        // Note that iteration order is relevant here! If we have the same
260        // parameter twice (e.g., function (x, y, x)), and that parameter
261        // needs to be copied into the context, it must be the last argument
262        // passed to the parameter that needs to be copied. This is a rare
263        // case so we don't check for it, instead we rely on the copying
264        // order: such a parameter is copied repeatedly into the same
265        // context location and thus the last value is what is seen inside
266        // the function.
267        frame_->AssertIsSpilled();
268        for (int i = 0; i < scope()->num_parameters(); i++) {
269          Variable* par = scope()->parameter(i);
270          Slot* slot = par->slot();
271          if (slot != NULL && slot->type() == Slot::CONTEXT) {
272            ASSERT(!scope()->is_global_scope());  // No params in global scope.
273            __ ldr(r1, frame_->ParameterAt(i));
274            // Loads r2 with context; used below in RecordWrite.
275            __ str(r1, SlotOperand(slot, r2));
276            // Load the offset into r3.
277            int slot_offset =
278                FixedArray::kHeaderSize + slot->index() * kPointerSize;
279            __ RecordWrite(r2, Operand(slot_offset), r3, r1);
280          }
281        }
282      }
283
284      // Store the arguments object.  This must happen after context
285      // initialization because the arguments object may be stored in
286      // the context.
287      if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
288        StoreArgumentsObject(true);
289      }
290
291      // Initialize ThisFunction reference if present.
292      if (scope()->is_function_scope() && scope()->function() != NULL) {
293        frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
294        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
295      }
296    } else {
297      // When used as the secondary compiler for splitting, r1, cp,
298      // fp, and lr have been pushed on the stack.  Adjust the virtual
299      // frame to match this state.
300      frame_->Adjust(4);
301
302      // Bind all the bailout labels to the beginning of the function.
303      List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
304      for (int i = 0; i < bailouts->length(); i++) {
305        __ bind(bailouts->at(i)->label());
306      }
307    }
308
309    // Initialize the function return target after the locals are set
310    // up, because it needs the expected frame height from the frame.
311    function_return_.SetExpectedHeight();
312    function_return_is_shadowed_ = false;
313
314    // Generate code to 'execute' declarations and initialize functions
315    // (source elements). In case of an illegal redeclaration we need to
316    // handle that instead of processing the declarations.
317    if (scope()->HasIllegalRedeclaration()) {
318      Comment cmnt(masm_, "[ illegal redeclarations");
319      scope()->VisitIllegalRedeclaration(this);
320    } else {
321      Comment cmnt(masm_, "[ declarations");
322      ProcessDeclarations(scope()->declarations());
323      // Bail out if a stack-overflow exception occurred when processing
324      // declarations.
325      if (HasStackOverflow()) return;
326    }
327
328    if (FLAG_trace) {
329      frame_->CallRuntime(Runtime::kTraceEnter, 0);
330      // Ignore the return value.
331    }
332
333    // Compile the body of the function in a vanilla state. Don't
334    // bother compiling all the code if the scope has an illegal
335    // redeclaration.
336    if (!scope()->HasIllegalRedeclaration()) {
337      Comment cmnt(masm_, "[ function body");
338#ifdef DEBUG
339      bool is_builtin = Bootstrapper::IsActive();
340      bool should_trace =
341          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
342      if (should_trace) {
343        frame_->CallRuntime(Runtime::kDebugTrace, 0);
344        // Ignore the return value.
345      }
346#endif
347      VisitStatements(info->function()->body());
348    }
349  }
350
351  // Handle the return from the function.
352  if (has_valid_frame()) {
353    // If there is a valid frame, control flow can fall off the end of
354    // the body.  In that case there is an implicit return statement.
355    ASSERT(!function_return_is_shadowed_);
356    frame_->PrepareForReturn();
357    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
358    if (function_return_.is_bound()) {
359      function_return_.Jump();
360    } else {
361      function_return_.Bind();
362      GenerateReturnSequence();
363    }
364  } else if (function_return_.is_linked()) {
365    // If the return target has dangling jumps to it, then we have not
366    // yet generated the return sequence.  This can happen when (a)
367    // control does not flow off the end of the body so we did not
368    // compile an artificial return statement just above, and (b) there
369    // are return statements in the body but (c) they are all shadowed.
370    function_return_.Bind();
371    GenerateReturnSequence();
372  }
373
374  // Adjust for function-level loop nesting.
375  ASSERT(loop_nesting_ == info->loop_nesting());
376  loop_nesting_ = 0;
377
378  // Code generation state must be reset.
379  ASSERT(!has_cc());
380  ASSERT(state_ == NULL);
381  ASSERT(loop_nesting() == 0);
382  ASSERT(!function_return_is_shadowed_);
383  function_return_.Unuse();
384  DeleteFrame();
385
386  // Process any deferred code using the register allocator.
387  if (!HasStackOverflow()) {
388    ProcessDeferred();
389  }
390
391  allocator_ = NULL;
392  type_info_ = NULL;
393}
394
395
396int CodeGenerator::NumberOfSlot(Slot* slot) {
397  if (slot == NULL) return kInvalidSlotNumber;
398  switch (slot->type()) {
399    case Slot::PARAMETER:
400      return slot->index();
401    case Slot::LOCAL:
402      return slot->index() + scope()->num_parameters();
403    default:
404      break;
405  }
406  return kInvalidSlotNumber;
407}
408
409
410MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
411  // Currently, this assertion will fail if we try to assign to
412  // a constant variable that is constant because it is read-only
413  // (such as the variable referring to a named function expression).
414  // We need to implement assignments to read-only variables.
415  // Ideally, we should do this during AST generation (by converting
416  // such assignments into expression statements); however, in general
417  // we may not be able to make the decision until past AST generation,
418  // that is when the entire program is known.
419  ASSERT(slot != NULL);
420  int index = slot->index();
421  switch (slot->type()) {
422    case Slot::PARAMETER:
423      return frame_->ParameterAt(index);
424
425    case Slot::LOCAL:
426      return frame_->LocalAt(index);
427
428    case Slot::CONTEXT: {
429      // Follow the context chain if necessary.
430      ASSERT(!tmp.is(cp));  // do not overwrite context register
431      Register context = cp;
432      int chain_length = scope()->ContextChainLength(slot->var()->scope());
433      for (int i = 0; i < chain_length; i++) {
434        // Load the closure.
435        // (All contexts, even 'with' contexts, have a closure,
436        // and it is the same for all contexts inside a function.
437        // There is no need to go to the function context first.)
438        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
439        // Load the function context (which is the incoming, outer context).
440        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
441        context = tmp;
442      }
443      // We may have a 'with' context now. Get the function context.
444      // (In fact this mov may never be the needed, since the scope analysis
445      // may not permit a direct context access in this case and thus we are
446      // always at a function context. However it is safe to dereference be-
447      // cause the function context of a function context is itself. Before
448      // deleting this mov we should try to create a counter-example first,
449      // though...)
450      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
451      return ContextOperand(tmp, index);
452    }
453
454    default:
455      UNREACHABLE();
456      return MemOperand(r0, 0);
457  }
458}
459
460
461MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
462    Slot* slot,
463    Register tmp,
464    Register tmp2,
465    JumpTarget* slow) {
466  ASSERT(slot->type() == Slot::CONTEXT);
467  Register context = cp;
468
469  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
470    if (s->num_heap_slots() > 0) {
471      if (s->calls_eval()) {
472        // Check that extension is NULL.
473        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
474        __ tst(tmp2, tmp2);
475        slow->Branch(ne);
476      }
477      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
478      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
479      context = tmp;
480    }
481  }
482  // Check that last extension is NULL.
483  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
484  __ tst(tmp2, tmp2);
485  slow->Branch(ne);
486  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
487  return ContextOperand(tmp, slot->index());
488}
489
490
491// Loads a value on TOS. If it is a boolean value, the result may have been
492// (partially) translated into branches, or it may have set the condition
493// code register. If force_cc is set, the value is forced to set the
494// condition code register and no value is pushed. If the condition code
495// register was set, has_cc() is true and cc_reg_ contains the condition to
496// test for 'true'.
497void CodeGenerator::LoadCondition(Expression* x,
498                                  JumpTarget* true_target,
499                                  JumpTarget* false_target,
500                                  bool force_cc) {
501  ASSERT(!has_cc());
502  int original_height = frame_->height();
503
504  { ConditionCodeGenState new_state(this, true_target, false_target);
505    Visit(x);
506
507    // If we hit a stack overflow, we may not have actually visited
508    // the expression.  In that case, we ensure that we have a
509    // valid-looking frame state because we will continue to generate
510    // code as we unwind the C++ stack.
511    //
512    // It's possible to have both a stack overflow and a valid frame
513    // state (eg, a subexpression overflowed, visiting it returned
514    // with a dummied frame state, and visiting this expression
515    // returned with a normal-looking state).
516    if (HasStackOverflow() &&
517        has_valid_frame() &&
518        !has_cc() &&
519        frame_->height() == original_height) {
520      true_target->Jump();
521    }
522  }
523  if (force_cc && frame_ != NULL && !has_cc()) {
524    // Convert the TOS value to a boolean in the condition code register.
525    ToBoolean(true_target, false_target);
526  }
527  ASSERT(!force_cc || !has_valid_frame() || has_cc());
528  ASSERT(!has_valid_frame() ||
529         (has_cc() && frame_->height() == original_height) ||
530         (!has_cc() && frame_->height() == original_height + 1));
531}
532
533
534void CodeGenerator::Load(Expression* expr) {
535#ifdef DEBUG
536  int original_height = frame_->height();
537#endif
538  JumpTarget true_target;
539  JumpTarget false_target;
540  LoadCondition(expr, &true_target, &false_target, false);
541
542  if (has_cc()) {
543    // Convert cc_reg_ into a boolean value.
544    JumpTarget loaded;
545    JumpTarget materialize_true;
546    materialize_true.Branch(cc_reg_);
547    frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
548    loaded.Jump();
549    materialize_true.Bind();
550    frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
551    loaded.Bind();
552    cc_reg_ = al;
553  }
554
555  if (true_target.is_linked() || false_target.is_linked()) {
556    // We have at least one condition value that has been "translated"
557    // into a branch, thus it needs to be loaded explicitly.
558    JumpTarget loaded;
559    if (frame_ != NULL) {
560      loaded.Jump();  // Don't lose the current TOS.
561    }
562    bool both = true_target.is_linked() && false_target.is_linked();
563    // Load "true" if necessary.
564    if (true_target.is_linked()) {
565      true_target.Bind();
566      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
567    }
568    // If both "true" and "false" need to be loaded jump across the code for
569    // "false".
570    if (both) {
571      loaded.Jump();
572    }
573    // Load "false" if necessary.
574    if (false_target.is_linked()) {
575      false_target.Bind();
576      frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
577    }
578    // A value is loaded on all paths reaching this point.
579    loaded.Bind();
580  }
581  ASSERT(has_valid_frame());
582  ASSERT(!has_cc());
583  ASSERT_EQ(original_height + 1, frame_->height());
584}
585
586
587void CodeGenerator::LoadGlobal() {
588  Register reg = frame_->GetTOSRegister();
589  __ ldr(reg, GlobalObject());
590  frame_->EmitPush(reg);
591}
592
593
594void CodeGenerator::LoadGlobalReceiver(Register scratch) {
595  Register reg = frame_->GetTOSRegister();
596  __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
597  __ ldr(reg,
598         FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
599  frame_->EmitPush(reg);
600}
601
602
603ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
604  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
605  ASSERT(scope()->arguments_shadow() != NULL);
606  // We don't want to do lazy arguments allocation for functions that
607  // have heap-allocated contexts, because it interfers with the
608  // uninitialized const tracking in the context objects.
609  return (scope()->num_heap_slots() > 0)
610      ? EAGER_ARGUMENTS_ALLOCATION
611      : LAZY_ARGUMENTS_ALLOCATION;
612}
613
614
615void CodeGenerator::StoreArgumentsObject(bool initial) {
616  ArgumentsAllocationMode mode = ArgumentsMode();
617  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
618
619  Comment cmnt(masm_, "[ store arguments object");
620  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
621    // When using lazy arguments allocation, we store the hole value
622    // as a sentinel indicating that the arguments object hasn't been
623    // allocated yet.
624    frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
625  } else {
626    frame_->SpillAll();
627    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
628    __ ldr(r2, frame_->Function());
629    // The receiver is below the arguments, the return address, and the
630    // frame pointer on the stack.
631    const int kReceiverDisplacement = 2 + scope()->num_parameters();
632    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
633    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
634    frame_->Adjust(3);
635    __ Push(r2, r1, r0);
636    frame_->CallStub(&stub, 3);
637    frame_->EmitPush(r0);
638  }
639
640  Variable* arguments = scope()->arguments()->var();
641  Variable* shadow = scope()->arguments_shadow()->var();
642  ASSERT(arguments != NULL && arguments->slot() != NULL);
643  ASSERT(shadow != NULL && shadow->slot() != NULL);
644  JumpTarget done;
645  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
646    // We have to skip storing into the arguments slot if it has
647    // already been written to. This can happen if the a function
648    // has a local variable named 'arguments'.
649    LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
650    Register arguments = frame_->PopToRegister();
651    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
652    __ cmp(arguments, ip);
653    done.Branch(ne);
654  }
655  StoreToSlot(arguments->slot(), NOT_CONST_INIT);
656  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
657  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
658}
659
660
661void CodeGenerator::LoadTypeofExpression(Expression* expr) {
662  // Special handling of identifiers as subexpressions of typeof.
663  Variable* variable = expr->AsVariableProxy()->AsVariable();
664  if (variable != NULL && !variable->is_this() && variable->is_global()) {
665    // For a global variable we build the property reference
666    // <global>.<variable> and perform a (regular non-contextual) property
667    // load to make sure we do not get reference errors.
668    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
669    Literal key(variable->name());
670    Property property(&global, &key, RelocInfo::kNoPosition);
671    Reference ref(this, &property);
672    ref.GetValue();
673  } else if (variable != NULL && variable->slot() != NULL) {
674    // For a variable that rewrites to a slot, we signal it is the immediate
675    // subexpression of a typeof.
676    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
677  } else {
678    // Anything else can be handled normally.
679    Load(expr);
680  }
681}
682
683
684Reference::Reference(CodeGenerator* cgen,
685                     Expression* expression,
686                     bool persist_after_get)
687    : cgen_(cgen),
688      expression_(expression),
689      type_(ILLEGAL),
690      persist_after_get_(persist_after_get) {
691  cgen->LoadReference(this);
692}
693
694
695Reference::~Reference() {
696  ASSERT(is_unloaded() || is_illegal());
697}
698
699
700void CodeGenerator::LoadReference(Reference* ref) {
701  Comment cmnt(masm_, "[ LoadReference");
702  Expression* e = ref->expression();
703  Property* property = e->AsProperty();
704  Variable* var = e->AsVariableProxy()->AsVariable();
705
706  if (property != NULL) {
707    // The expression is either a property or a variable proxy that rewrites
708    // to a property.
709    Load(property->obj());
710    if (property->key()->IsPropertyName()) {
711      ref->set_type(Reference::NAMED);
712    } else {
713      Load(property->key());
714      ref->set_type(Reference::KEYED);
715    }
716  } else if (var != NULL) {
717    // The expression is a variable proxy that does not rewrite to a
718    // property.  Global variables are treated as named property references.
719    if (var->is_global()) {
720      LoadGlobal();
721      ref->set_type(Reference::NAMED);
722    } else {
723      ASSERT(var->slot() != NULL);
724      ref->set_type(Reference::SLOT);
725    }
726  } else {
727    // Anything else is a runtime error.
728    Load(e);
729    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
730  }
731}
732
733
734void CodeGenerator::UnloadReference(Reference* ref) {
735  int size = ref->size();
736  ref->set_unloaded();
737  if (size == 0) return;
738
739  // Pop a reference from the stack while preserving TOS.
740  VirtualFrame::RegisterAllocationScope scope(this);
741  Comment cmnt(masm_, "[ UnloadReference");
742  if (size > 0) {
743    Register tos = frame_->PopToRegister();
744    frame_->Drop(size);
745    frame_->EmitPush(tos);
746  }
747}
748
749
750// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
751// register to a boolean in the condition code register. The code
752// may jump to 'false_target' in case the register converts to 'false'.
753void CodeGenerator::ToBoolean(JumpTarget* true_target,
754                              JumpTarget* false_target) {
755  // Note: The generated code snippet does not change stack variables.
756  //       Only the condition code should be set.
757  bool known_smi = frame_->KnownSmiAt(0);
758  Register tos = frame_->PopToRegister();
759
760  // Fast case checks
761
762  // Check if the value is 'false'.
763  if (!known_smi) {
764    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
765    __ cmp(tos, ip);
766    false_target->Branch(eq);
767
768    // Check if the value is 'true'.
769    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
770    __ cmp(tos, ip);
771    true_target->Branch(eq);
772
773    // Check if the value is 'undefined'.
774    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
775    __ cmp(tos, ip);
776    false_target->Branch(eq);
777  }
778
779  // Check if the value is a smi.
780  __ cmp(tos, Operand(Smi::FromInt(0)));
781
782  if (!known_smi) {
783    false_target->Branch(eq);
784    __ tst(tos, Operand(kSmiTagMask));
785    true_target->Branch(eq);
786
787    // Slow case: call the runtime.
788    frame_->EmitPush(tos);
789    frame_->CallRuntime(Runtime::kToBool, 1);
790    // Convert the result (r0) to a condition code.
791    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
792    __ cmp(r0, ip);
793  }
794
795  cc_reg_ = ne;
796}
797
798
799void CodeGenerator::GenericBinaryOperation(Token::Value op,
800                                           OverwriteMode overwrite_mode,
801                                           GenerateInlineSmi inline_smi,
802                                           int constant_rhs) {
803  // top of virtual frame: y
804  // 2nd elt. on virtual frame : x
805  // result : top of virtual frame
806
807  // Stub is entered with a call: 'return address' is in lr.
808  switch (op) {
809    case Token::ADD:
810    case Token::SUB:
811      if (inline_smi) {
812        JumpTarget done;
813        Register rhs = frame_->PopToRegister();
814        Register lhs = frame_->PopToRegister(rhs);
815        Register scratch = VirtualFrame::scratch0();
816        __ orr(scratch, rhs, Operand(lhs));
817        // Check they are both small and positive.
818        __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
819        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
820        STATIC_ASSERT(kSmiTag == 0);
821        if (op == Token::ADD) {
822          __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
823        } else {
824          __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
825        }
826        done.Branch(eq);
827        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
828        frame_->SpillAll();
829        frame_->CallStub(&stub, 0);
830        done.Bind();
831        frame_->EmitPush(r0);
832        break;
833      } else {
834        // Fall through!
835      }
836    case Token::BIT_OR:
837    case Token::BIT_AND:
838    case Token::BIT_XOR:
839      if (inline_smi) {
840        bool rhs_is_smi = frame_->KnownSmiAt(0);
841        bool lhs_is_smi = frame_->KnownSmiAt(1);
842        Register rhs = frame_->PopToRegister();
843        Register lhs = frame_->PopToRegister(rhs);
844        Register smi_test_reg;
845        Condition cond;
846        if (!rhs_is_smi || !lhs_is_smi) {
847          if (rhs_is_smi) {
848            smi_test_reg = lhs;
849          } else if (lhs_is_smi) {
850            smi_test_reg = rhs;
851          } else {
852            smi_test_reg = VirtualFrame::scratch0();
853            __ orr(smi_test_reg, rhs, Operand(lhs));
854          }
855          // Check they are both Smis.
856          __ tst(smi_test_reg, Operand(kSmiTagMask));
857          cond = eq;
858        } else {
859          cond = al;
860        }
861        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
862        if (op == Token::BIT_OR) {
863          __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
864        } else if (op == Token::BIT_AND) {
865          __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
866        } else {
867          ASSERT(op == Token::BIT_XOR);
868          STATIC_ASSERT(kSmiTag == 0);
869          __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
870        }
871        if (cond != al) {
872          JumpTarget done;
873          done.Branch(cond);
874          GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
875          frame_->SpillAll();
876          frame_->CallStub(&stub, 0);
877          done.Bind();
878        }
879        frame_->EmitPush(r0);
880        break;
881      } else {
882        // Fall through!
883      }
884    case Token::MUL:
885    case Token::DIV:
886    case Token::MOD:
887    case Token::SHL:
888    case Token::SHR:
889    case Token::SAR: {
890      Register rhs = frame_->PopToRegister();
891      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
892      GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
893      frame_->SpillAll();
894      frame_->CallStub(&stub, 0);
895      frame_->EmitPush(r0);
896      break;
897    }
898
899    case Token::COMMA: {
900      Register scratch = frame_->PopToRegister();
901      // Simply discard left value.
902      frame_->Drop();
903      frame_->EmitPush(scratch);
904      break;
905    }
906
907    default:
908      // Other cases should have been handled before this point.
909      UNREACHABLE();
910      break;
911  }
912}
913
914
915class DeferredInlineSmiOperation: public DeferredCode {
916 public:
917  DeferredInlineSmiOperation(Token::Value op,
918                             int value,
919                             bool reversed,
920                             OverwriteMode overwrite_mode,
921                             Register tos)
922      : op_(op),
923        value_(value),
924        reversed_(reversed),
925        overwrite_mode_(overwrite_mode),
926        tos_register_(tos) {
927    set_comment("[ DeferredInlinedSmiOperation");
928  }
929
930  virtual void Generate();
931
932 private:
933  Token::Value op_;
934  int value_;
935  bool reversed_;
936  OverwriteMode overwrite_mode_;
937  Register tos_register_;
938};
939
940
941
942// On entry the non-constant side of the binary operation is in tos_register_
943// and the constant smi side is nowhere.  The tos_register_ is not used by the
944// virtual frame.  On exit the answer is in the tos_register_ and the virtual
945// frame is unchanged.
946void DeferredInlineSmiOperation::Generate() {
947  VirtualFrame copied_frame(*frame_state()->frame());
948  copied_frame.SpillAll();
949
950  Register lhs = r1;
951  Register rhs = r0;
952  switch (op_) {
953    case Token::ADD: {
954      // Revert optimistic add.
955      if (reversed_) {
956        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
957        __ mov(r1, Operand(Smi::FromInt(value_)));
958      } else {
959        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
960        __ mov(r0, Operand(Smi::FromInt(value_)));
961      }
962      break;
963    }
964
965    case Token::SUB: {
966      // Revert optimistic sub.
967      if (reversed_) {
968        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
969        __ mov(r1, Operand(Smi::FromInt(value_)));
970      } else {
971        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
972        __ mov(r0, Operand(Smi::FromInt(value_)));
973      }
974      break;
975    }
976
977    // For these operations there is no optimistic operation that needs to be
978    // reverted.
979    case Token::MUL:
980    case Token::MOD:
981    case Token::BIT_OR:
982    case Token::BIT_XOR:
983    case Token::BIT_AND:
984    case Token::SHL:
985    case Token::SHR:
986    case Token::SAR: {
987      if (tos_register_.is(r1)) {
988        __ mov(r0, Operand(Smi::FromInt(value_)));
989      } else {
990        ASSERT(tos_register_.is(r0));
991        __ mov(r1, Operand(Smi::FromInt(value_)));
992      }
993      if (reversed_ == tos_register_.is(r1)) {
994          lhs = r0;
995          rhs = r1;
996      }
997      break;
998    }
999
1000    default:
1001      // Other cases should have been handled before this point.
1002      UNREACHABLE();
1003      break;
1004  }
1005
1006  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
1007  __ CallStub(&stub);
1008
1009  // The generic stub returns its value in r0, but that's not
1010  // necessarily what we want.  We want whatever the inlined code
1011  // expected, which is that the answer is in the same register as
1012  // the operand was.
1013  __ Move(tos_register_, r0);
1014
1015  // The tos register was not in use for the virtual frame that we
1016  // came into this function with, so we can merge back to that frame
1017  // without trashing it.
1018  copied_frame.MergeTo(frame_state()->frame());
1019}
1020
1021
1022static bool PopCountLessThanEqual2(unsigned int x) {
1023  x &= x - 1;
1024  return (x & (x - 1)) == 0;
1025}
1026
1027
1028// Returns the index of the lowest bit set.
1029static int BitPosition(unsigned x) {
1030  int bit_posn = 0;
1031  while ((x & 0xf) == 0) {
1032    bit_posn += 4;
1033    x >>= 4;
1034  }
1035  while ((x & 1) == 0) {
1036    bit_posn++;
1037    x >>= 1;
1038  }
1039  return bit_posn;
1040}
1041
1042
1043void CodeGenerator::SmiOperation(Token::Value op,
1044                                 Handle<Object> value,
1045                                 bool reversed,
1046                                 OverwriteMode mode) {
1047  int int_value = Smi::cast(*value)->value();
1048
1049  bool both_sides_are_smi = frame_->KnownSmiAt(0);
1050
1051  bool something_to_inline;
1052  switch (op) {
1053    case Token::ADD:
1054    case Token::SUB:
1055    case Token::BIT_AND:
1056    case Token::BIT_OR:
1057    case Token::BIT_XOR: {
1058      something_to_inline = true;
1059      break;
1060    }
1061    case Token::SHL: {
1062      something_to_inline = (both_sides_are_smi || !reversed);
1063      break;
1064    }
1065    case Token::SHR:
1066    case Token::SAR: {
1067      if (reversed) {
1068        something_to_inline = false;
1069      } else {
1070        something_to_inline = true;
1071      }
1072      break;
1073    }
1074    case Token::MOD: {
1075      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1076        something_to_inline = false;
1077      } else {
1078        something_to_inline = true;
1079      }
1080      break;
1081    }
1082    case Token::MUL: {
1083      if (!IsEasyToMultiplyBy(int_value)) {
1084        something_to_inline = false;
1085      } else {
1086        something_to_inline = true;
1087      }
1088      break;
1089    }
1090    default: {
1091      something_to_inline = false;
1092      break;
1093    }
1094  }
1095
1096  if (!something_to_inline) {
1097    if (!reversed) {
1098      // Push the rhs onto the virtual frame by putting it in a TOS register.
1099      Register rhs = frame_->GetTOSRegister();
1100      __ mov(rhs, Operand(value));
1101      frame_->EmitPush(rhs, TypeInfo::Smi());
1102      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
1103    } else {
1104      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
1105      // at most one pop, the rest takes place in TOS registers.
1106      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
1107      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
1108      __ mov(lhs, Operand(value));
1109      frame_->EmitPush(lhs, TypeInfo::Smi());
1110      TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
1111      frame_->EmitPush(rhs, t);
1112      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue);
1113    }
1114    return;
1115  }
1116
1117  // We move the top of stack to a register (normally no move is invoved).
1118  Register tos = frame_->PopToRegister();
1119  switch (op) {
1120    case Token::ADD: {
1121      DeferredCode* deferred =
1122          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1123
1124      __ add(tos, tos, Operand(value), SetCC);
1125      deferred->Branch(vs);
1126      if (!both_sides_are_smi) {
1127        __ tst(tos, Operand(kSmiTagMask));
1128        deferred->Branch(ne);
1129      }
1130      deferred->BindExit();
1131      frame_->EmitPush(tos);
1132      break;
1133    }
1134
1135    case Token::SUB: {
1136      DeferredCode* deferred =
1137          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1138
1139      if (reversed) {
1140        __ rsb(tos, tos, Operand(value), SetCC);
1141      } else {
1142        __ sub(tos, tos, Operand(value), SetCC);
1143      }
1144      deferred->Branch(vs);
1145      if (!both_sides_are_smi) {
1146        __ tst(tos, Operand(kSmiTagMask));
1147        deferred->Branch(ne);
1148      }
1149      deferred->BindExit();
1150      frame_->EmitPush(tos);
1151      break;
1152    }
1153
1154
1155    case Token::BIT_OR:
1156    case Token::BIT_XOR:
1157    case Token::BIT_AND: {
1158      if (both_sides_are_smi) {
1159        switch (op) {
1160          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1161          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1162          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1163          default: UNREACHABLE();
1164        }
1165        frame_->EmitPush(tos, TypeInfo::Smi());
1166      } else {
1167        DeferredCode* deferred =
1168          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1169        __ tst(tos, Operand(kSmiTagMask));
1170        deferred->Branch(ne);
1171        switch (op) {
1172          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1173          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1174          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1175          default: UNREACHABLE();
1176        }
1177        deferred->BindExit();
1178        TypeInfo result_type =
1179            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
1180        frame_->EmitPush(tos, result_type);
1181      }
1182      break;
1183    }
1184
1185    case Token::SHL:
1186      if (reversed) {
1187        ASSERT(both_sides_are_smi);
1188        int max_shift = 0;
1189        int max_result = int_value == 0 ? 1 : int_value;
1190        while (Smi::IsValid(max_result << 1)) {
1191          max_shift++;
1192          max_result <<= 1;
1193        }
1194        DeferredCode* deferred =
1195          new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
1196        // Mask off the last 5 bits of the shift operand (rhs).  This is part
1197        // of the definition of shift in JS and we know we have a Smi so we
1198        // can safely do this.  The masked version gets passed to the
1199        // deferred code, but that makes no difference.
1200        __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
1201        __ cmp(tos, Operand(Smi::FromInt(max_shift)));
1202        deferred->Branch(ge);
1203        Register scratch = VirtualFrame::scratch0();
1204        __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Untag.
1205        __ mov(tos, Operand(Smi::FromInt(int_value)));    // Load constant.
1206        __ mov(tos, Operand(tos, LSL, scratch));          // Shift constant.
1207        deferred->BindExit();
1208        TypeInfo result = TypeInfo::Integer32();
1209        frame_->EmitPush(tos, result);
1210        break;
1211      }
1212      // Fall through!
1213    case Token::SHR:
1214    case Token::SAR: {
1215      ASSERT(!reversed);
1216      TypeInfo result = TypeInfo::Integer32();
1217      Register scratch = VirtualFrame::scratch0();
1218      Register scratch2 = VirtualFrame::scratch1();
1219      int shift_value = int_value & 0x1f;  // least significant 5 bits
1220      DeferredCode* deferred =
1221        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
1222      uint32_t problematic_mask = kSmiTagMask;
1223      // For unsigned shift by zero all negative smis are problematic.
1224      bool skip_smi_test = both_sides_are_smi;
1225      if (shift_value == 0 && op == Token::SHR) {
1226        problematic_mask |= 0x80000000;
1227        skip_smi_test = false;
1228      }
1229      if (!skip_smi_test) {
1230        __ tst(tos, Operand(problematic_mask));
1231        deferred->Branch(ne);  // Go slow for problematic input.
1232      }
1233      switch (op) {
1234        case Token::SHL: {
1235          if (shift_value != 0) {
1236            int adjusted_shift = shift_value - kSmiTagSize;
1237            ASSERT(adjusted_shift >= 0);
1238            if (adjusted_shift != 0) {
1239              __ mov(scratch, Operand(tos, LSL, adjusted_shift));
1240              // Check that the *signed* result fits in a smi.
1241              __ add(scratch2, scratch, Operand(0x40000000), SetCC);
1242              deferred->Branch(mi);
1243              __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1244            } else {
1245              // Check that the *signed* result fits in a smi.
1246              __ add(scratch2, tos, Operand(0x40000000), SetCC);
1247              deferred->Branch(mi);
1248              __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1249            }
1250          }
1251          break;
1252        }
1253        case Token::SHR: {
1254          if (shift_value != 0) {
1255            __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Remove tag.
1256            // LSR by immediate 0 means shifting 32 bits.
1257            __ mov(scratch, Operand(scratch, LSR, shift_value));
1258            if (shift_value == 1) {
1259              // check that the *unsigned* result fits in a smi
1260              // neither of the two high-order bits can be set:
1261              // - 0x80000000: high bit would be lost when smi tagging
1262              // - 0x40000000: this number would convert to negative when
1263              // smi tagging these two cases can only happen with shifts
1264              // by 0 or 1 when handed a valid smi
1265              __ tst(scratch, Operand(0xc0000000));
1266              deferred->Branch(ne);
1267            } else {
1268              ASSERT(shift_value >= 2);
1269              result = TypeInfo::Smi();  // SHR by at least 2 gives a Smi.
1270            }
1271            __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1272          }
1273          break;
1274        }
1275        case Token::SAR: {
1276          // In the ARM instructions set, ASR by immediate 0 means shifting 32
1277          // bits.
1278          if (shift_value != 0) {
1279            // Do the shift and the tag removal in one operation.  If the shift
1280            // is 31 bits (the highest possible value) then we emit the
1281            // instruction as a shift by 0 which means shift arithmetically by
1282            // 32.
1283            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1284            // Put tag back.
1285            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1286            // SAR by at least 1 gives a Smi.
1287            result = TypeInfo::Smi();
1288          }
1289          break;
1290        }
1291        default: UNREACHABLE();
1292      }
1293      deferred->BindExit();
1294      frame_->EmitPush(tos, result);
1295      break;
1296    }
1297
1298    case Token::MOD: {
1299      ASSERT(!reversed);
1300      ASSERT(int_value >= 2);
1301      ASSERT(IsPowerOf2(int_value));
1302      DeferredCode* deferred =
1303          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1304      unsigned mask = (0x80000000u | kSmiTagMask);
1305      __ tst(tos, Operand(mask));
1306      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
1307      mask = (int_value << kSmiTagSize) - 1;
1308      __ and_(tos, tos, Operand(mask));
1309      deferred->BindExit();
1310      // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
1311      frame_->EmitPush(
1312          tos,
1313          both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
1314      break;
1315    }
1316
1317    case Token::MUL: {
1318      ASSERT(IsEasyToMultiplyBy(int_value));
1319      DeferredCode* deferred =
1320          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1321      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1322      max_smi_that_wont_overflow <<= kSmiTagSize;
1323      unsigned mask = 0x80000000u;
1324      while ((mask & max_smi_that_wont_overflow) == 0) {
1325        mask |= mask >> 1;
1326      }
1327      mask |= kSmiTagMask;
1328      // This does a single mask that checks for a too high value in a
1329      // conservative way and for a non-Smi.  It also filters out negative
1330      // numbers, unfortunately, but since this code is inline we prefer
1331      // brevity to comprehensiveness.
1332      __ tst(tos, Operand(mask));
1333      deferred->Branch(ne);
1334      MultiplyByKnownInt(masm_, tos, tos, int_value);
1335      deferred->BindExit();
1336      frame_->EmitPush(tos);
1337      break;
1338    }
1339
1340    default:
1341      UNREACHABLE();
1342      break;
1343  }
1344}
1345
1346
1347void CodeGenerator::Comparison(Condition cc,
1348                               Expression* left,
1349                               Expression* right,
1350                               bool strict) {
1351  VirtualFrame::RegisterAllocationScope scope(this);
1352
1353  if (left != NULL) Load(left);
1354  if (right != NULL) Load(right);
1355
1356  // sp[0] : y
1357  // sp[1] : x
1358  // result : cc register
1359
1360  // Strict only makes sense for equality comparisons.
1361  ASSERT(!strict || cc == eq);
1362
1363  Register lhs;
1364  Register rhs;
1365
1366  bool lhs_is_smi;
1367  bool rhs_is_smi;
1368
1369  // We load the top two stack positions into registers chosen by the virtual
1370  // frame.  This should keep the register shuffling to a minimum.
1371  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1372  if (cc == gt || cc == le) {
1373    cc = ReverseCondition(cc);
1374    lhs_is_smi = frame_->KnownSmiAt(0);
1375    rhs_is_smi = frame_->KnownSmiAt(1);
1376    lhs = frame_->PopToRegister();
1377    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
1378  } else {
1379    rhs_is_smi = frame_->KnownSmiAt(0);
1380    lhs_is_smi = frame_->KnownSmiAt(1);
1381    rhs = frame_->PopToRegister();
1382    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
1383  }
1384
1385  bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
1386
1387  ASSERT(rhs.is(r0) || rhs.is(r1));
1388  ASSERT(lhs.is(r0) || lhs.is(r1));
1389
1390  JumpTarget exit;
1391
1392  if (!both_sides_are_smi) {
1393    // Now we have the two sides in r0 and r1.  We flush any other registers
1394    // because the stub doesn't know about register allocation.
1395    frame_->SpillAll();
1396    Register scratch = VirtualFrame::scratch0();
1397    Register smi_test_reg;
1398    if (lhs_is_smi) {
1399      smi_test_reg = rhs;
1400    } else if (rhs_is_smi) {
1401      smi_test_reg = lhs;
1402    } else {
1403      __ orr(scratch, lhs, Operand(rhs));
1404      smi_test_reg = scratch;
1405    }
1406    __ tst(smi_test_reg, Operand(kSmiTagMask));
1407    JumpTarget smi;
1408    smi.Branch(eq);
1409
1410    // Perform non-smi comparison by stub.
1411    // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1412    // We call with 0 args because there are 0 on the stack.
1413    CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
1414    frame_->CallStub(&stub, 0);
1415    __ cmp(r0, Operand(0));
1416    exit.Jump();
1417
1418    smi.Bind();
1419  }
1420
1421  // Do smi comparisons by pointer comparison.
1422  __ cmp(lhs, Operand(rhs));
1423
1424  exit.Bind();
1425  cc_reg_ = cc;
1426}
1427
1428
1429// Call the function on the stack with the given arguments.
1430void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1431                                      CallFunctionFlags flags,
1432                                      int position) {
1433  // Push the arguments ("left-to-right") on the stack.
1434  int arg_count = args->length();
1435  for (int i = 0; i < arg_count; i++) {
1436    Load(args->at(i));
1437  }
1438
1439  // Record the position for debugging purposes.
1440  CodeForSourcePosition(position);
1441
1442  // Use the shared code stub to call the function.
1443  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1444  CallFunctionStub call_function(arg_count, in_loop, flags);
1445  frame_->CallStub(&call_function, arg_count + 1);
1446
1447  // Restore context and pop function from the stack.
1448  __ ldr(cp, frame_->Context());
1449  frame_->Drop();  // discard the TOS
1450}
1451
1452
1453void CodeGenerator::CallApplyLazy(Expression* applicand,
1454                                  Expression* receiver,
1455                                  VariableProxy* arguments,
1456                                  int position) {
1457  // An optimized implementation of expressions of the form
1458  // x.apply(y, arguments).
1459  // If the arguments object of the scope has not been allocated,
1460  // and x.apply is Function.prototype.apply, this optimization
1461  // just copies y and the arguments of the current function on the
1462  // stack, as receiver and arguments, and calls x.
1463  // In the implementation comments, we call x the applicand
1464  // and y the receiver.
1465
1466  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
1467  ASSERT(arguments->IsArguments());
1468
1469  // Load applicand.apply onto the stack. This will usually
1470  // give us a megamorphic load site. Not super, but it works.
1471  Load(applicand);
1472  Handle<String> name = Factory::LookupAsciiSymbol("apply");
1473  frame_->Dup();
1474  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1475  frame_->EmitPush(r0);
1476
1477  // Load the receiver and the existing arguments object onto the
1478  // expression stack. Avoid allocating the arguments object here.
1479  Load(receiver);
1480  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
1481
1482  // At this point the top two stack elements are probably in registers
1483  // since they were just loaded.  Ensure they are in regs and get the
1484  // regs.
1485  Register receiver_reg = frame_->Peek2();
1486  Register arguments_reg = frame_->Peek();
1487
1488  // From now on the frame is spilled.
1489  frame_->SpillAll();
1490
1491  // Emit the source position information after having loaded the
1492  // receiver and the arguments.
1493  CodeForSourcePosition(position);
1494  // Contents of the stack at this point:
1495  //   sp[0]: arguments object of the current function or the hole.
1496  //   sp[1]: receiver
1497  //   sp[2]: applicand.apply
1498  //   sp[3]: applicand.
1499
1500  // Check if the arguments object has been lazily allocated
1501  // already. If so, just use that instead of copying the arguments
1502  // from the stack. This also deals with cases where a local variable
1503  // named 'arguments' has been introduced.
1504  JumpTarget slow;
1505  Label done;
1506  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1507  __ cmp(ip, arguments_reg);
1508  slow.Branch(ne);
1509
1510  Label build_args;
1511  // Get rid of the arguments object probe.
1512  frame_->Drop();
1513  // Stack now has 3 elements on it.
1514  // Contents of stack at this point:
1515  //   sp[0]: receiver - in the receiver_reg register.
1516  //   sp[1]: applicand.apply
1517  //   sp[2]: applicand.
1518
1519  // Check that the receiver really is a JavaScript object.
1520  __ BranchOnSmi(receiver_reg, &build_args);
1521  // We allow all JSObjects including JSFunctions.  As long as
1522  // JS_FUNCTION_TYPE is the last instance type and it is right
1523  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1524  // bound.
1525  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1526  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1527  __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
1528  __ b(lt, &build_args);
1529
1530  // Check that applicand.apply is Function.prototype.apply.
1531  __ ldr(r0, MemOperand(sp, kPointerSize));
1532  __ BranchOnSmi(r0, &build_args);
1533  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1534  __ b(ne, &build_args);
1535  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
1536  Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1537  __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
1538  __ cmp(r1, Operand(apply_code));
1539  __ b(ne, &build_args);
1540
1541  // Check that applicand is a function.
1542  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1543  __ BranchOnSmi(r1, &build_args);
1544  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1545  __ b(ne, &build_args);
1546
1547  // Copy the arguments to this function possibly from the
1548  // adaptor frame below it.
1549  Label invoke, adapted;
1550  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1551  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1552  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1553  __ b(eq, &adapted);
1554
1555  // No arguments adaptor frame. Copy fixed number of arguments.
1556  __ mov(r0, Operand(scope()->num_parameters()));
1557  for (int i = 0; i < scope()->num_parameters(); i++) {
1558    __ ldr(r2, frame_->ParameterAt(i));
1559    __ push(r2);
1560  }
1561  __ jmp(&invoke);
1562
1563  // Arguments adaptor frame present. Copy arguments from there, but
1564  // avoid copying too many arguments to avoid stack overflows.
1565  __ bind(&adapted);
1566  static const uint32_t kArgumentsLimit = 1 * KB;
1567  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1568  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
1569  __ mov(r3, r0);
1570  __ cmp(r0, Operand(kArgumentsLimit));
1571  __ b(gt, &build_args);
1572
1573  // Loop through the arguments pushing them onto the execution
1574  // stack. We don't inform the virtual frame of the push, so we don't
1575  // have to worry about getting rid of the elements from the virtual
1576  // frame.
1577  Label loop;
1578  // r3 is a small non-negative integer, due to the test above.
1579  __ cmp(r3, Operand(0));
1580  __ b(eq, &invoke);
1581  // Compute the address of the first argument.
1582  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
1583  __ add(r2, r2, Operand(kPointerSize));
1584  __ bind(&loop);
1585  // Post-decrement argument address by kPointerSize on each iteration.
1586  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
1587  __ push(r4);
1588  __ sub(r3, r3, Operand(1), SetCC);
1589  __ b(gt, &loop);
1590
1591  // Invoke the function.
1592  __ bind(&invoke);
1593  ParameterCount actual(r0);
1594  __ InvokeFunction(r1, actual, CALL_FUNCTION);
1595  // Drop applicand.apply and applicand from the stack, and push
1596  // the result of the function call, but leave the spilled frame
1597  // unchanged, with 3 elements, so it is correct when we compile the
1598  // slow-case code.
1599  __ add(sp, sp, Operand(2 * kPointerSize));
1600  __ push(r0);
1601  // Stack now has 1 element:
1602  //   sp[0]: result
1603  __ jmp(&done);
1604
1605  // Slow-case: Allocate the arguments object since we know it isn't
1606  // there, and fall-through to the slow-case where we call
1607  // applicand.apply.
1608  __ bind(&build_args);
1609  // Stack now has 3 elements, because we have jumped from where:
1610  //   sp[0]: receiver
1611  //   sp[1]: applicand.apply
1612  //   sp[2]: applicand.
1613  StoreArgumentsObject(false);
1614
1615  // Stack and frame now have 4 elements.
1616  slow.Bind();
1617
1618  // Generic computation of x.apply(y, args) with no special optimization.
1619  // Flip applicand.apply and applicand on the stack, so
1620  // applicand looks like the receiver of the applicand.apply call.
1621  // Then process it as a normal function call.
1622  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1623  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1624  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
1625
1626  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1627  frame_->CallStub(&call_function, 3);
1628  // The function and its two arguments have been dropped.
1629  frame_->Drop();  // Drop the receiver as well.
1630  frame_->EmitPush(r0);
1631  // Stack now has 1 element:
1632  //   sp[0]: result
1633  __ bind(&done);
1634
1635  // Restore the context register after a call.
1636  __ ldr(cp, frame_->Context());
1637}
1638
1639
1640void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1641  ASSERT(has_cc());
1642  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1643  target->Branch(cc);
1644  cc_reg_ = al;
1645}
1646
1647
1648void CodeGenerator::CheckStack() {
1649  frame_->SpillAll();
1650  Comment cmnt(masm_, "[ check stack");
1651  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1652  // Put the lr setup instruction in the delay slot.  kInstrSize is added to
1653  // the implicit 8 byte offset that always applies to operations with pc and
1654  // gives a return address 12 bytes down.
1655  masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1656  masm_->cmp(sp, Operand(ip));
1657  StackCheckStub stub;
1658  // Call the stub if lower.
1659  masm_->mov(pc,
1660             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1661                     RelocInfo::CODE_TARGET),
1662             LeaveCC,
1663             lo);
1664}
1665
1666
1667void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1668#ifdef DEBUG
1669  int original_height = frame_->height();
1670#endif
1671  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1672    Visit(statements->at(i));
1673  }
1674  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1675}
1676
1677
1678void CodeGenerator::VisitBlock(Block* node) {
1679#ifdef DEBUG
1680  int original_height = frame_->height();
1681#endif
1682  Comment cmnt(masm_, "[ Block");
1683  CodeForStatementPosition(node);
1684  node->break_target()->SetExpectedHeight();
1685  VisitStatements(node->statements());
1686  if (node->break_target()->is_linked()) {
1687    node->break_target()->Bind();
1688  }
1689  node->break_target()->Unuse();
1690  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1691}
1692
1693
1694void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1695  frame_->EmitPush(cp);
1696  frame_->EmitPush(Operand(pairs));
1697  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1698
1699  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1700  // The result is discarded.
1701}
1702
1703
1704void CodeGenerator::VisitDeclaration(Declaration* node) {
1705#ifdef DEBUG
1706  int original_height = frame_->height();
1707#endif
1708  Comment cmnt(masm_, "[ Declaration");
1709  Variable* var = node->proxy()->var();
1710  ASSERT(var != NULL);  // must have been resolved
1711  Slot* slot = var->slot();
1712
1713  // If it was not possible to allocate the variable at compile time,
1714  // we need to "declare" it at runtime to make sure it actually
1715  // exists in the local context.
1716  if (slot != NULL && slot->type() == Slot::LOOKUP) {
1717    // Variables with a "LOOKUP" slot were introduced as non-locals
1718    // during variable resolution and must have mode DYNAMIC.
1719    ASSERT(var->is_dynamic());
1720    // For now, just do a runtime call.
1721    frame_->EmitPush(cp);
1722    frame_->EmitPush(Operand(var->name()));
1723    // Declaration nodes are always declared in only two modes.
1724    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1725    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1726    frame_->EmitPush(Operand(Smi::FromInt(attr)));
1727    // Push initial value, if any.
1728    // Note: For variables we must not push an initial value (such as
1729    // 'undefined') because we may have a (legal) redeclaration and we
1730    // must not destroy the current value.
1731    if (node->mode() == Variable::CONST) {
1732      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
1733    } else if (node->fun() != NULL) {
1734      Load(node->fun());
1735    } else {
1736      frame_->EmitPush(Operand(0));
1737    }
1738
1739    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1740    // Ignore the return value (declarations are statements).
1741
1742    ASSERT(frame_->height() == original_height);
1743    return;
1744  }
1745
1746  ASSERT(!var->is_global());
1747
1748  // If we have a function or a constant, we need to initialize the variable.
1749  Expression* val = NULL;
1750  if (node->mode() == Variable::CONST) {
1751    val = new Literal(Factory::the_hole_value());
1752  } else {
1753    val = node->fun();  // NULL if we don't have a function
1754  }
1755
1756
1757  if (val != NULL) {
1758    WriteBarrierCharacter wb_info =
1759        val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
1760    if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
1761    // Set initial value.
1762    Reference target(this, node->proxy());
1763    Load(val);
1764    target.SetValue(NOT_CONST_INIT, wb_info);
1765
1766    // Get rid of the assigned value (declarations are statements).
1767    frame_->Drop();
1768  }
1769  ASSERT(frame_->height() == original_height);
1770}
1771
1772
1773void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1774#ifdef DEBUG
1775  int original_height = frame_->height();
1776#endif
1777  Comment cmnt(masm_, "[ ExpressionStatement");
1778  CodeForStatementPosition(node);
1779  Expression* expression = node->expression();
1780  expression->MarkAsStatement();
1781  Load(expression);
1782  frame_->Drop();
1783  ASSERT(frame_->height() == original_height);
1784}
1785
1786
1787void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1788#ifdef DEBUG
1789  int original_height = frame_->height();
1790#endif
1791  Comment cmnt(masm_, "// EmptyStatement");
1792  CodeForStatementPosition(node);
1793  // nothing to do
1794  ASSERT(frame_->height() == original_height);
1795}
1796
1797
1798void CodeGenerator::VisitIfStatement(IfStatement* node) {
1799#ifdef DEBUG
1800  int original_height = frame_->height();
1801#endif
1802  Comment cmnt(masm_, "[ IfStatement");
1803  // Generate different code depending on which parts of the if statement
1804  // are present or not.
1805  bool has_then_stm = node->HasThenStatement();
1806  bool has_else_stm = node->HasElseStatement();
1807
1808  CodeForStatementPosition(node);
1809
1810  JumpTarget exit;
1811  if (has_then_stm && has_else_stm) {
1812    Comment cmnt(masm_, "[ IfThenElse");
1813    JumpTarget then;
1814    JumpTarget else_;
1815    // if (cond)
1816    LoadCondition(node->condition(), &then, &else_, true);
1817    if (frame_ != NULL) {
1818      Branch(false, &else_);
1819    }
1820    // then
1821    if (frame_ != NULL || then.is_linked()) {
1822      then.Bind();
1823      Visit(node->then_statement());
1824    }
1825    if (frame_ != NULL) {
1826      exit.Jump();
1827    }
1828    // else
1829    if (else_.is_linked()) {
1830      else_.Bind();
1831      Visit(node->else_statement());
1832    }
1833
1834  } else if (has_then_stm) {
1835    Comment cmnt(masm_, "[ IfThen");
1836    ASSERT(!has_else_stm);
1837    JumpTarget then;
1838    // if (cond)
1839    LoadCondition(node->condition(), &then, &exit, true);
1840    if (frame_ != NULL) {
1841      Branch(false, &exit);
1842    }
1843    // then
1844    if (frame_ != NULL || then.is_linked()) {
1845      then.Bind();
1846      Visit(node->then_statement());
1847    }
1848
1849  } else if (has_else_stm) {
1850    Comment cmnt(masm_, "[ IfElse");
1851    ASSERT(!has_then_stm);
1852    JumpTarget else_;
1853    // if (!cond)
1854    LoadCondition(node->condition(), &exit, &else_, true);
1855    if (frame_ != NULL) {
1856      Branch(true, &exit);
1857    }
1858    // else
1859    if (frame_ != NULL || else_.is_linked()) {
1860      else_.Bind();
1861      Visit(node->else_statement());
1862    }
1863
1864  } else {
1865    Comment cmnt(masm_, "[ If");
1866    ASSERT(!has_then_stm && !has_else_stm);
1867    // if (cond)
1868    LoadCondition(node->condition(), &exit, &exit, false);
1869    if (frame_ != NULL) {
1870      if (has_cc()) {
1871        cc_reg_ = al;
1872      } else {
1873        frame_->Drop();
1874      }
1875    }
1876  }
1877
1878  // end
1879  if (exit.is_linked()) {
1880    exit.Bind();
1881  }
1882  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1883}
1884
1885
1886void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1887  Comment cmnt(masm_, "[ ContinueStatement");
1888  CodeForStatementPosition(node);
1889  node->target()->continue_target()->Jump();
1890}
1891
1892
1893void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1894  Comment cmnt(masm_, "[ BreakStatement");
1895  CodeForStatementPosition(node);
1896  node->target()->break_target()->Jump();
1897}
1898
1899
1900void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1901  frame_->SpillAll();
1902  Comment cmnt(masm_, "[ ReturnStatement");
1903
1904  CodeForStatementPosition(node);
1905  Load(node->expression());
1906  if (function_return_is_shadowed_) {
1907    frame_->EmitPop(r0);
1908    function_return_.Jump();
1909  } else {
1910    // Pop the result from the frame and prepare the frame for
1911    // returning thus making it easier to merge.
1912    frame_->PopToR0();
1913    frame_->PrepareForReturn();
1914    if (function_return_.is_bound()) {
1915      // If the function return label is already bound we reuse the
1916      // code by jumping to the return site.
1917      function_return_.Jump();
1918    } else {
1919      function_return_.Bind();
1920      GenerateReturnSequence();
1921    }
1922  }
1923}
1924
1925
1926void CodeGenerator::GenerateReturnSequence() {
1927  if (FLAG_trace) {
1928    // Push the return value on the stack as the parameter.
1929    // Runtime::TraceExit returns the parameter as it is.
1930    frame_->EmitPush(r0);
1931    frame_->CallRuntime(Runtime::kTraceExit, 1);
1932  }
1933
1934#ifdef DEBUG
1935  // Add a label for checking the size of the code used for returning.
1936  Label check_exit_codesize;
1937  masm_->bind(&check_exit_codesize);
1938#endif
1939  // Make sure that the constant pool is not emitted inside of the return
1940  // sequence.
1941  { Assembler::BlockConstPoolScope block_const_pool(masm_);
1942    // Tear down the frame which will restore the caller's frame pointer and
1943    // the link register.
1944    frame_->Exit();
1945
1946    // Here we use masm_-> instead of the __ macro to avoid the code coverage
1947    // tool from instrumenting as we rely on the code size here.
1948    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
1949    masm_->add(sp, sp, Operand(sp_delta));
1950    masm_->Jump(lr);
1951    DeleteFrame();
1952
1953#ifdef DEBUG
1954    // Check that the size of the code used for returning matches what is
1955    // expected by the debugger. If the sp_delts above cannot be encoded in
1956    // the add instruction the add will generate two instructions.
1957    int return_sequence_length =
1958        masm_->InstructionsGeneratedSince(&check_exit_codesize);
1959    CHECK(return_sequence_length ==
1960          Assembler::kJSReturnSequenceInstructions ||
1961          return_sequence_length ==
1962          Assembler::kJSReturnSequenceInstructions + 1);
1963#endif
1964  }
1965}
1966
1967
1968void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1969#ifdef DEBUG
1970  int original_height = frame_->height();
1971#endif
1972  Comment cmnt(masm_, "[ WithEnterStatement");
1973  CodeForStatementPosition(node);
1974  Load(node->expression());
1975  if (node->is_catch_block()) {
1976    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1977  } else {
1978    frame_->CallRuntime(Runtime::kPushContext, 1);
1979  }
1980#ifdef DEBUG
1981  JumpTarget verified_true;
1982  __ cmp(r0, cp);
1983  verified_true.Branch(eq);
1984  __ stop("PushContext: r0 is expected to be the same as cp");
1985  verified_true.Bind();
1986#endif
1987  // Update context local.
1988  __ str(cp, frame_->Context());
1989  ASSERT(frame_->height() == original_height);
1990}
1991
1992
1993void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1994#ifdef DEBUG
1995  int original_height = frame_->height();
1996#endif
1997  Comment cmnt(masm_, "[ WithExitStatement");
1998  CodeForStatementPosition(node);
1999  // Pop context.
2000  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
2001  // Update context local.
2002  __ str(cp, frame_->Context());
2003  ASSERT(frame_->height() == original_height);
2004}
2005
2006
2007void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
2008#ifdef DEBUG
2009  int original_height = frame_->height();
2010#endif
2011  Comment cmnt(masm_, "[ SwitchStatement");
2012  CodeForStatementPosition(node);
2013  node->break_target()->SetExpectedHeight();
2014
2015  Load(node->tag());
2016
2017  JumpTarget next_test;
2018  JumpTarget fall_through;
2019  JumpTarget default_entry;
2020  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
2021  ZoneList<CaseClause*>* cases = node->cases();
2022  int length = cases->length();
2023  CaseClause* default_clause = NULL;
2024
2025  for (int i = 0; i < length; i++) {
2026    CaseClause* clause = cases->at(i);
2027    if (clause->is_default()) {
2028      // Remember the default clause and compile it at the end.
2029      default_clause = clause;
2030      continue;
2031    }
2032
2033    Comment cmnt(masm_, "[ Case clause");
2034    // Compile the test.
2035    next_test.Bind();
2036    next_test.Unuse();
2037    // Duplicate TOS.
2038    frame_->Dup();
2039    Comparison(eq, NULL, clause->label(), true);
2040    Branch(false, &next_test);
2041
2042    // Before entering the body from the test, remove the switch value from
2043    // the stack.
2044    frame_->Drop();
2045
2046    // Label the body so that fall through is enabled.
2047    if (i > 0 && cases->at(i - 1)->is_default()) {
2048      default_exit.Bind();
2049    } else {
2050      fall_through.Bind();
2051      fall_through.Unuse();
2052    }
2053    VisitStatements(clause->statements());
2054
2055    // If control flow can fall through from the body, jump to the next body
2056    // or the end of the statement.
2057    if (frame_ != NULL) {
2058      if (i < length - 1 && cases->at(i + 1)->is_default()) {
2059        default_entry.Jump();
2060      } else {
2061        fall_through.Jump();
2062      }
2063    }
2064  }
2065
2066  // The final "test" removes the switch value.
2067  next_test.Bind();
2068  frame_->Drop();
2069
2070  // If there is a default clause, compile it.
2071  if (default_clause != NULL) {
2072    Comment cmnt(masm_, "[ Default clause");
2073    default_entry.Bind();
2074    VisitStatements(default_clause->statements());
2075    // If control flow can fall out of the default and there is a case after
2076    // it, jump to that case's body.
2077    if (frame_ != NULL && default_exit.is_bound()) {
2078      default_exit.Jump();
2079    }
2080  }
2081
2082  if (fall_through.is_linked()) {
2083    fall_through.Bind();
2084  }
2085
2086  if (node->break_target()->is_linked()) {
2087    node->break_target()->Bind();
2088  }
2089  node->break_target()->Unuse();
2090  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2091}
2092
2093
2094void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
2095#ifdef DEBUG
2096  int original_height = frame_->height();
2097#endif
2098  Comment cmnt(masm_, "[ DoWhileStatement");
2099  CodeForStatementPosition(node);
2100  node->break_target()->SetExpectedHeight();
2101  JumpTarget body(JumpTarget::BIDIRECTIONAL);
2102  IncrementLoopNesting();
2103
2104  // Label the top of the loop for the backward CFG edge.  If the test
2105  // is always true we can use the continue target, and if the test is
2106  // always false there is no need.
2107  ConditionAnalysis info = AnalyzeCondition(node->cond());
2108  switch (info) {
2109    case ALWAYS_TRUE:
2110      node->continue_target()->SetExpectedHeight();
2111      node->continue_target()->Bind();
2112      break;
2113    case ALWAYS_FALSE:
2114      node->continue_target()->SetExpectedHeight();
2115      break;
2116    case DONT_KNOW:
2117      node->continue_target()->SetExpectedHeight();
2118      body.Bind();
2119      break;
2120  }
2121
2122  CheckStack();  // TODO(1222600): ignore if body contains calls.
2123  Visit(node->body());
2124
2125  // Compile the test.
2126  switch (info) {
2127    case ALWAYS_TRUE:
2128      // If control can fall off the end of the body, jump back to the
2129      // top.
2130      if (has_valid_frame()) {
2131        node->continue_target()->Jump();
2132      }
2133      break;
2134    case ALWAYS_FALSE:
2135      // If we have a continue in the body, we only have to bind its
2136      // jump target.
2137      if (node->continue_target()->is_linked()) {
2138        node->continue_target()->Bind();
2139      }
2140      break;
2141    case DONT_KNOW:
2142      // We have to compile the test expression if it can be reached by
2143      // control flow falling out of the body or via continue.
2144      if (node->continue_target()->is_linked()) {
2145        node->continue_target()->Bind();
2146      }
2147      if (has_valid_frame()) {
2148        Comment cmnt(masm_, "[ DoWhileCondition");
2149        CodeForDoWhileConditionPosition(node);
2150        LoadCondition(node->cond(), &body, node->break_target(), true);
2151        if (has_valid_frame()) {
2152          // A invalid frame here indicates that control did not
2153          // fall out of the test expression.
2154          Branch(true, &body);
2155        }
2156      }
2157      break;
2158  }
2159
2160  if (node->break_target()->is_linked()) {
2161    node->break_target()->Bind();
2162  }
2163  DecrementLoopNesting();
2164  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2165}
2166
2167
2168void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
2169#ifdef DEBUG
2170  int original_height = frame_->height();
2171#endif
2172  Comment cmnt(masm_, "[ WhileStatement");
2173  CodeForStatementPosition(node);
2174
2175  // If the test is never true and has no side effects there is no need
2176  // to compile the test or body.
2177  ConditionAnalysis info = AnalyzeCondition(node->cond());
2178  if (info == ALWAYS_FALSE) return;
2179
2180  node->break_target()->SetExpectedHeight();
2181  IncrementLoopNesting();
2182
2183  // Label the top of the loop with the continue target for the backward
2184  // CFG edge.
2185  node->continue_target()->SetExpectedHeight();
2186  node->continue_target()->Bind();
2187
2188  if (info == DONT_KNOW) {
2189    JumpTarget body(JumpTarget::BIDIRECTIONAL);
2190    LoadCondition(node->cond(), &body, node->break_target(), true);
2191    if (has_valid_frame()) {
2192      // A NULL frame indicates that control did not fall out of the
2193      // test expression.
2194      Branch(false, node->break_target());
2195    }
2196    if (has_valid_frame() || body.is_linked()) {
2197      body.Bind();
2198    }
2199  }
2200
2201  if (has_valid_frame()) {
2202    CheckStack();  // TODO(1222600): ignore if body contains calls.
2203    Visit(node->body());
2204
2205    // If control flow can fall out of the body, jump back to the top.
2206    if (has_valid_frame()) {
2207      node->continue_target()->Jump();
2208    }
2209  }
2210  if (node->break_target()->is_linked()) {
2211    node->break_target()->Bind();
2212  }
2213  DecrementLoopNesting();
2214  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2215}
2216
2217
2218void CodeGenerator::VisitForStatement(ForStatement* node) {
2219#ifdef DEBUG
2220  int original_height = frame_->height();
2221#endif
2222  Comment cmnt(masm_, "[ ForStatement");
2223  CodeForStatementPosition(node);
2224  if (node->init() != NULL) {
2225    Visit(node->init());
2226  }
2227
2228  // If the test is never true there is no need to compile the test or
2229  // body.
2230  ConditionAnalysis info = AnalyzeCondition(node->cond());
2231  if (info == ALWAYS_FALSE) return;
2232
2233  node->break_target()->SetExpectedHeight();
2234  IncrementLoopNesting();
2235
2236  // We know that the loop index is a smi if it is not modified in the
2237  // loop body and it is checked against a constant limit in the loop
2238  // condition.  In this case, we reset the static type information of the
2239  // loop index to smi before compiling the body, the update expression, and
2240  // the bottom check of the loop condition.
2241  TypeInfoCodeGenState type_info_scope(this,
2242                                       node->is_fast_smi_loop() ?
2243                                           node->loop_variable()->slot() :
2244                                           NULL,
2245                                       TypeInfo::Smi());
2246
2247  // If there is no update statement, label the top of the loop with the
2248  // continue target, otherwise with the loop target.
2249  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2250  if (node->next() == NULL) {
2251    node->continue_target()->SetExpectedHeight();
2252    node->continue_target()->Bind();
2253  } else {
2254    node->continue_target()->SetExpectedHeight();
2255    loop.Bind();
2256  }
2257
2258  // If the test is always true, there is no need to compile it.
2259  if (info == DONT_KNOW) {
2260    JumpTarget body;
2261    LoadCondition(node->cond(), &body, node->break_target(), true);
2262    if (has_valid_frame()) {
2263      Branch(false, node->break_target());
2264    }
2265    if (has_valid_frame() || body.is_linked()) {
2266      body.Bind();
2267    }
2268  }
2269
2270  if (has_valid_frame()) {
2271    CheckStack();  // TODO(1222600): ignore if body contains calls.
2272    Visit(node->body());
2273
2274    if (node->next() == NULL) {
2275      // If there is no update statement and control flow can fall out
2276      // of the loop, jump directly to the continue label.
2277      if (has_valid_frame()) {
2278        node->continue_target()->Jump();
2279      }
2280    } else {
2281      // If there is an update statement and control flow can reach it
2282      // via falling out of the body of the loop or continuing, we
2283      // compile the update statement.
2284      if (node->continue_target()->is_linked()) {
2285        node->continue_target()->Bind();
2286      }
2287      if (has_valid_frame()) {
2288        // Record source position of the statement as this code which is
2289        // after the code for the body actually belongs to the loop
2290        // statement and not the body.
2291        CodeForStatementPosition(node);
2292        Visit(node->next());
2293        loop.Jump();
2294      }
2295    }
2296  }
2297  if (node->break_target()->is_linked()) {
2298    node->break_target()->Bind();
2299  }
2300  DecrementLoopNesting();
2301  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2302}
2303
2304
2305void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2306#ifdef DEBUG
2307  int original_height = frame_->height();
2308#endif
2309  VirtualFrame::SpilledScope spilled_scope(frame_);
2310  Comment cmnt(masm_, "[ ForInStatement");
2311  CodeForStatementPosition(node);
2312
2313  JumpTarget primitive;
2314  JumpTarget jsobject;
2315  JumpTarget fixed_array;
2316  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
2317  JumpTarget end_del_check;
2318  JumpTarget exit;
2319
2320  // Get the object to enumerate over (converted to JSObject).
2321  Load(node->enumerable());
2322
2323  // Both SpiderMonkey and kjs ignore null and undefined in contrast
2324  // to the specification.  12.6.4 mandates a call to ToObject.
2325  frame_->EmitPop(r0);
2326  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2327  __ cmp(r0, ip);
2328  exit.Branch(eq);
2329  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2330  __ cmp(r0, ip);
2331  exit.Branch(eq);
2332
2333  // Stack layout in body:
2334  // [iteration counter (Smi)]
2335  // [length of array]
2336  // [FixedArray]
2337  // [Map or 0]
2338  // [Object]
2339
2340  // Check if enumerable is already a JSObject
2341  __ tst(r0, Operand(kSmiTagMask));
2342  primitive.Branch(eq);
2343  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2344  jsobject.Branch(hs);
2345
2346  primitive.Bind();
2347  frame_->EmitPush(r0);
2348  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
2349
2350  jsobject.Bind();
2351  // Get the set of properties (as a FixedArray or Map).
2352  // r0: value to be iterated over
2353  frame_->EmitPush(r0);  // Push the object being iterated over.
2354
2355  // Check cache validity in generated code. This is a fast case for
2356  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
2357  // guarantee cache validity, call the runtime system to check cache
2358  // validity or get the property names in a fixed array.
2359  JumpTarget call_runtime;
2360  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2361  JumpTarget check_prototype;
2362  JumpTarget use_cache;
2363  __ mov(r1, Operand(r0));
2364  loop.Bind();
2365  // Check that there are no elements.
2366  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
2367  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
2368  __ cmp(r2, r4);
2369  call_runtime.Branch(ne);
2370  // Check that instance descriptors are not empty so that we can
2371  // check for an enum cache.  Leave the map in r3 for the subsequent
2372  // prototype load.
2373  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2374  __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
2375  __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
2376  __ cmp(r2, ip);
2377  call_runtime.Branch(eq);
2378  // Check that there in an enum cache in the non-empty instance
2379  // descriptors.  This is the case if the next enumeration index
2380  // field does not contain a smi.
2381  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
2382  __ tst(r2, Operand(kSmiTagMask));
2383  call_runtime.Branch(eq);
2384  // For all objects but the receiver, check that the cache is empty.
2385  // r4: empty fixed array root.
2386  __ cmp(r1, r0);
2387  check_prototype.Branch(eq);
2388  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
2389  __ cmp(r2, r4);
2390  call_runtime.Branch(ne);
2391  check_prototype.Bind();
2392  // Load the prototype from the map and loop if non-null.
2393  __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
2394  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2395  __ cmp(r1, ip);
2396  loop.Branch(ne);
2397  // The enum cache is valid.  Load the map of the object being
2398  // iterated over and use the cache for the iteration.
2399  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
2400  use_cache.Jump();
2401
2402  call_runtime.Bind();
2403  // Call the runtime to get the property names for the object.
2404  frame_->EmitPush(r0);  // push the object (slot 4) for the runtime call
2405  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2406
2407  // If we got a map from the runtime call, we can do a fast
2408  // modification check. Otherwise, we got a fixed array, and we have
2409  // to do a slow check.
2410  // r0: map or fixed array (result from call to
2411  // Runtime::kGetPropertyNamesFast)
2412  __ mov(r2, Operand(r0));
2413  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2414  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
2415  __ cmp(r1, ip);
2416  fixed_array.Branch(ne);
2417
2418  use_cache.Bind();
2419  // Get enum cache
2420  // r0: map (either the result from a call to
2421  // Runtime::kGetPropertyNamesFast or has been fetched directly from
2422  // the object)
2423  __ mov(r1, Operand(r0));
2424  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
2425  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
2426  __ ldr(r2,
2427         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2428
2429  frame_->EmitPush(r0);  // map
2430  frame_->EmitPush(r2);  // enum cache bridge cache
2431  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2432  frame_->EmitPush(r0);
2433  __ mov(r0, Operand(Smi::FromInt(0)));
2434  frame_->EmitPush(r0);
2435  entry.Jump();
2436
2437  fixed_array.Bind();
2438  __ mov(r1, Operand(Smi::FromInt(0)));
2439  frame_->EmitPush(r1);  // insert 0 in place of Map
2440  frame_->EmitPush(r0);
2441
2442  // Push the length of the array and the initial index onto the stack.
2443  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2444  frame_->EmitPush(r0);
2445  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
2446  frame_->EmitPush(r0);
2447
2448  // Condition.
2449  entry.Bind();
2450  // sp[0] : index
2451  // sp[1] : array/enum cache length
2452  // sp[2] : array or enum cache
2453  // sp[3] : 0 or map
2454  // sp[4] : enumerable
2455  // Grab the current frame's height for the break and continue
2456  // targets only after all the state is pushed on the frame.
2457  node->break_target()->SetExpectedHeight();
2458  node->continue_target()->SetExpectedHeight();
2459
2460  // Load the current count to r0, load the length to r1.
2461  __ Ldrd(r0, r1, frame_->ElementAt(0));
2462  __ cmp(r0, r1);  // compare to the array length
2463  node->break_target()->Branch(hs);
2464
2465  // Get the i'th entry of the array.
2466  __ ldr(r2, frame_->ElementAt(2));
2467  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2468  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2469
2470  // Get Map or 0.
2471  __ ldr(r2, frame_->ElementAt(3));
2472  // Check if this (still) matches the map of the enumerable.
2473  // If not, we have to filter the key.
2474  __ ldr(r1, frame_->ElementAt(4));
2475  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
2476  __ cmp(r1, Operand(r2));
2477  end_del_check.Branch(eq);
2478
2479  // Convert the entry to a string (or null if it isn't a property anymore).
2480  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
2481  frame_->EmitPush(r0);
2482  frame_->EmitPush(r3);  // push entry
2483  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
2484  __ mov(r3, Operand(r0));
2485
2486  // If the property has been removed while iterating, we just skip it.
2487  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2488  __ cmp(r3, ip);
2489  node->continue_target()->Branch(eq);
2490
2491  end_del_check.Bind();
2492  // Store the entry in the 'each' expression and take another spin in the
2493  // loop.  r3: i'th entry of the enum cache (or string there of)
2494  frame_->EmitPush(r3);  // push entry
2495  { Reference each(this, node->each());
2496    if (!each.is_illegal()) {
2497      if (each.size() > 0) {
2498        __ ldr(r0, frame_->ElementAt(each.size()));
2499        frame_->EmitPush(r0);
2500        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2501        frame_->Drop(2);
2502      } else {
2503        // If the reference was to a slot we rely on the convenient property
2504        // that it doesn't matter whether a value (eg, r3 pushed above) is
2505        // right on top of or right underneath a zero-sized reference.
2506        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2507        frame_->Drop();
2508      }
2509    }
2510  }
2511  // Body.
2512  CheckStack();  // TODO(1222600): ignore if body contains calls.
2513  Visit(node->body());
2514
2515  // Next.  Reestablish a spilled frame in case we are coming here via
2516  // a continue in the body.
2517  node->continue_target()->Bind();
2518  frame_->SpillAll();
2519  frame_->EmitPop(r0);
2520  __ add(r0, r0, Operand(Smi::FromInt(1)));
2521  frame_->EmitPush(r0);
2522  entry.Jump();
2523
2524  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
2525  // any frame.
2526  node->break_target()->Bind();
2527  frame_->Drop(5);
2528
2529  // Exit.
2530  exit.Bind();
2531  node->continue_target()->Unuse();
2532  node->break_target()->Unuse();
2533  ASSERT(frame_->height() == original_height);
2534}
2535
2536
2537void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
2538#ifdef DEBUG
2539  int original_height = frame_->height();
2540#endif
2541  VirtualFrame::SpilledScope spilled_scope(frame_);
2542  Comment cmnt(masm_, "[ TryCatchStatement");
2543  CodeForStatementPosition(node);
2544
2545  JumpTarget try_block;
2546  JumpTarget exit;
2547
2548  try_block.Call();
2549  // --- Catch block ---
2550  frame_->EmitPush(r0);
2551
2552  // Store the caught exception in the catch variable.
2553  Variable* catch_var = node->catch_var()->var();
2554  ASSERT(catch_var != NULL && catch_var->slot() != NULL);
2555  StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
2556
2557  // Remove the exception from the stack.
2558  frame_->Drop();
2559
2560  VisitStatements(node->catch_block()->statements());
2561  if (frame_ != NULL) {
2562    exit.Jump();
2563  }
2564
2565
2566  // --- Try block ---
2567  try_block.Bind();
2568
2569  frame_->PushTryHandler(TRY_CATCH_HANDLER);
2570  int handler_height = frame_->height();
2571
2572  // Shadow the labels for all escapes from the try block, including
2573  // returns. During shadowing, the original label is hidden as the
2574  // LabelShadow and operations on the original actually affect the
2575  // shadowing label.
2576  //
2577  // We should probably try to unify the escaping labels and the return
2578  // label.
2579  int nof_escapes = node->escaping_targets()->length();
2580  List<ShadowTarget*> shadows(1 + nof_escapes);
2581
2582  // Add the shadow target for the function return.
2583  static const int kReturnShadowIndex = 0;
2584  shadows.Add(new ShadowTarget(&function_return_));
2585  bool function_return_was_shadowed = function_return_is_shadowed_;
2586  function_return_is_shadowed_ = true;
2587  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2588
2589  // Add the remaining shadow targets.
2590  for (int i = 0; i < nof_escapes; i++) {
2591    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2592  }
2593
2594  // Generate code for the statements in the try block.
2595  VisitStatements(node->try_block()->statements());
2596
2597  // Stop the introduced shadowing and count the number of required unlinks.
2598  // After shadowing stops, the original labels are unshadowed and the
2599  // LabelShadows represent the formerly shadowing labels.
2600  bool has_unlinks = false;
2601  for (int i = 0; i < shadows.length(); i++) {
2602    shadows[i]->StopShadowing();
2603    has_unlinks = has_unlinks || shadows[i]->is_linked();
2604  }
2605  function_return_is_shadowed_ = function_return_was_shadowed;
2606
2607  // Get an external reference to the handler address.
2608  ExternalReference handler_address(Top::k_handler_address);
2609
2610  // If we can fall off the end of the try block, unlink from try chain.
2611  if (has_valid_frame()) {
2612    // The next handler address is on top of the frame.  Unlink from
2613    // the handler list and drop the rest of this handler from the
2614    // frame.
2615    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2616    frame_->EmitPop(r1);
2617    __ mov(r3, Operand(handler_address));
2618    __ str(r1, MemOperand(r3));
2619    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2620    if (has_unlinks) {
2621      exit.Jump();
2622    }
2623  }
2624
2625  // Generate unlink code for the (formerly) shadowing labels that have been
2626  // jumped to.  Deallocate each shadow target.
2627  for (int i = 0; i < shadows.length(); i++) {
2628    if (shadows[i]->is_linked()) {
2629      // Unlink from try chain;
2630      shadows[i]->Bind();
2631      // Because we can be jumping here (to spilled code) from unspilled
2632      // code, we need to reestablish a spilled frame at this block.
2633      frame_->SpillAll();
2634
2635      // Reload sp from the top handler, because some statements that we
2636      // break from (eg, for...in) may have left stuff on the stack.
2637      __ mov(r3, Operand(handler_address));
2638      __ ldr(sp, MemOperand(r3));
2639      frame_->Forget(frame_->height() - handler_height);
2640
2641      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2642      frame_->EmitPop(r1);
2643      __ str(r1, MemOperand(r3));
2644      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2645
2646      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2647        frame_->PrepareForReturn();
2648      }
2649      shadows[i]->other_target()->Jump();
2650    }
2651  }
2652
2653  exit.Bind();
2654  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2655}
2656
2657
2658void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2659#ifdef DEBUG
2660  int original_height = frame_->height();
2661#endif
2662  VirtualFrame::SpilledScope spilled_scope(frame_);
2663  Comment cmnt(masm_, "[ TryFinallyStatement");
2664  CodeForStatementPosition(node);
2665
2666  // State: Used to keep track of reason for entering the finally
2667  // block. Should probably be extended to hold information for
2668  // break/continue from within the try block.
2669  enum { FALLING, THROWING, JUMPING };
2670
2671  JumpTarget try_block;
2672  JumpTarget finally_block;
2673
2674  try_block.Call();
2675
2676  frame_->EmitPush(r0);  // save exception object on the stack
2677  // In case of thrown exceptions, this is where we continue.
2678  __ mov(r2, Operand(Smi::FromInt(THROWING)));
2679  finally_block.Jump();
2680
2681  // --- Try block ---
2682  try_block.Bind();
2683
2684  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2685  int handler_height = frame_->height();
2686
2687  // Shadow the labels for all escapes from the try block, including
2688  // returns.  Shadowing hides the original label as the LabelShadow and
2689  // operations on the original actually affect the shadowing label.
2690  //
2691  // We should probably try to unify the escaping labels and the return
2692  // label.
2693  int nof_escapes = node->escaping_targets()->length();
2694  List<ShadowTarget*> shadows(1 + nof_escapes);
2695
2696  // Add the shadow target for the function return.
2697  static const int kReturnShadowIndex = 0;
2698  shadows.Add(new ShadowTarget(&function_return_));
2699  bool function_return_was_shadowed = function_return_is_shadowed_;
2700  function_return_is_shadowed_ = true;
2701  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2702
2703  // Add the remaining shadow targets.
2704  for (int i = 0; i < nof_escapes; i++) {
2705    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2706  }
2707
2708  // Generate code for the statements in the try block.
2709  VisitStatements(node->try_block()->statements());
2710
2711  // Stop the introduced shadowing and count the number of required unlinks.
2712  // After shadowing stops, the original labels are unshadowed and the
2713  // LabelShadows represent the formerly shadowing labels.
2714  int nof_unlinks = 0;
2715  for (int i = 0; i < shadows.length(); i++) {
2716    shadows[i]->StopShadowing();
2717    if (shadows[i]->is_linked()) nof_unlinks++;
2718  }
2719  function_return_is_shadowed_ = function_return_was_shadowed;
2720
2721  // Get an external reference to the handler address.
2722  ExternalReference handler_address(Top::k_handler_address);
2723
2724  // If we can fall off the end of the try block, unlink from the try
2725  // chain and set the state on the frame to FALLING.
2726  if (has_valid_frame()) {
2727    // The next handler address is on top of the frame.
2728    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2729    frame_->EmitPop(r1);
2730    __ mov(r3, Operand(handler_address));
2731    __ str(r1, MemOperand(r3));
2732    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2733
2734    // Fake a top of stack value (unneeded when FALLING) and set the
2735    // state in r2, then jump around the unlink blocks if any.
2736    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2737    frame_->EmitPush(r0);
2738    __ mov(r2, Operand(Smi::FromInt(FALLING)));
2739    if (nof_unlinks > 0) {
2740      finally_block.Jump();
2741    }
2742  }
2743
2744  // Generate code to unlink and set the state for the (formerly)
2745  // shadowing targets that have been jumped to.
2746  for (int i = 0; i < shadows.length(); i++) {
2747    if (shadows[i]->is_linked()) {
2748      // If we have come from the shadowed return, the return value is
2749      // in (a non-refcounted reference to) r0.  We must preserve it
2750      // until it is pushed.
2751      //
2752      // Because we can be jumping here (to spilled code) from
2753      // unspilled code, we need to reestablish a spilled frame at
2754      // this block.
2755      shadows[i]->Bind();
2756      frame_->SpillAll();
2757
2758      // Reload sp from the top handler, because some statements that
2759      // we break from (eg, for...in) may have left stuff on the
2760      // stack.
2761      __ mov(r3, Operand(handler_address));
2762      __ ldr(sp, MemOperand(r3));
2763      frame_->Forget(frame_->height() - handler_height);
2764
2765      // Unlink this handler and drop it from the frame.  The next
2766      // handler address is currently on top of the frame.
2767      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2768      frame_->EmitPop(r1);
2769      __ str(r1, MemOperand(r3));
2770      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2771
2772      if (i == kReturnShadowIndex) {
2773        // If this label shadowed the function return, materialize the
2774        // return value on the stack.
2775        frame_->EmitPush(r0);
2776      } else {
2777        // Fake TOS for targets that shadowed breaks and continues.
2778        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2779        frame_->EmitPush(r0);
2780      }
2781      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2782      if (--nof_unlinks > 0) {
2783        // If this is not the last unlink block, jump around the next.
2784        finally_block.Jump();
2785      }
2786    }
2787  }
2788
2789  // --- Finally block ---
2790  finally_block.Bind();
2791
2792  // Push the state on the stack.
2793  frame_->EmitPush(r2);
2794
2795  // We keep two elements on the stack - the (possibly faked) result
2796  // and the state - while evaluating the finally block.
2797  //
2798  // Generate code for the statements in the finally block.
2799  VisitStatements(node->finally_block()->statements());
2800
2801  if (has_valid_frame()) {
2802    // Restore state and return value or faked TOS.
2803    frame_->EmitPop(r2);
2804    frame_->EmitPop(r0);
2805  }
2806
2807  // Generate code to jump to the right destination for all used
2808  // formerly shadowing targets.  Deallocate each shadow target.
2809  for (int i = 0; i < shadows.length(); i++) {
2810    if (has_valid_frame() && shadows[i]->is_bound()) {
2811      JumpTarget* original = shadows[i]->other_target();
2812      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2813      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2814        JumpTarget skip;
2815        skip.Branch(ne);
2816        frame_->PrepareForReturn();
2817        original->Jump();
2818        skip.Bind();
2819      } else {
2820        original->Branch(eq);
2821      }
2822    }
2823  }
2824
2825  if (has_valid_frame()) {
2826    // Check if we need to rethrow the exception.
2827    JumpTarget exit;
2828    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2829    exit.Branch(ne);
2830
2831    // Rethrow exception.
2832    frame_->EmitPush(r0);
2833    frame_->CallRuntime(Runtime::kReThrow, 1);
2834
2835    // Done.
2836    exit.Bind();
2837  }
2838  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2839}
2840
2841
2842void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2843#ifdef DEBUG
2844  int original_height = frame_->height();
2845#endif
2846  Comment cmnt(masm_, "[ DebuggerStatament");
2847  CodeForStatementPosition(node);
2848#ifdef ENABLE_DEBUGGER_SUPPORT
2849  frame_->DebugBreak();
2850#endif
2851  // Ignore the return value.
2852  ASSERT(frame_->height() == original_height);
2853}
2854
2855
2856void CodeGenerator::InstantiateFunction(
2857    Handle<SharedFunctionInfo> function_info) {
2858  // Use the fast case closure allocation code that allocates in new
2859  // space for nested functions that don't need literals cloning.
2860  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
2861    FastNewClosureStub stub;
2862    frame_->EmitPush(Operand(function_info));
2863    frame_->SpillAll();
2864    frame_->CallStub(&stub, 1);
2865    frame_->EmitPush(r0);
2866  } else {
2867    // Create a new closure.
2868    frame_->EmitPush(cp);
2869    frame_->EmitPush(Operand(function_info));
2870    frame_->CallRuntime(Runtime::kNewClosure, 2);
2871    frame_->EmitPush(r0);
2872  }
2873}
2874
2875
2876void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2877#ifdef DEBUG
2878  int original_height = frame_->height();
2879#endif
2880  Comment cmnt(masm_, "[ FunctionLiteral");
2881
2882  // Build the function info and instantiate it.
2883  Handle<SharedFunctionInfo> function_info =
2884      Compiler::BuildFunctionInfo(node, script(), this);
2885  // Check for stack-overflow exception.
2886  if (HasStackOverflow()) {
2887    ASSERT(frame_->height() == original_height);
2888    return;
2889  }
2890  InstantiateFunction(function_info);
2891  ASSERT_EQ(original_height + 1, frame_->height());
2892}
2893
2894
2895void CodeGenerator::VisitSharedFunctionInfoLiteral(
2896    SharedFunctionInfoLiteral* node) {
2897#ifdef DEBUG
2898  int original_height = frame_->height();
2899#endif
2900  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
2901  InstantiateFunction(node->shared_function_info());
2902  ASSERT_EQ(original_height + 1, frame_->height());
2903}
2904
2905
2906void CodeGenerator::VisitConditional(Conditional* node) {
2907#ifdef DEBUG
2908  int original_height = frame_->height();
2909#endif
2910  Comment cmnt(masm_, "[ Conditional");
2911  JumpTarget then;
2912  JumpTarget else_;
2913  LoadCondition(node->condition(), &then, &else_, true);
2914  if (has_valid_frame()) {
2915    Branch(false, &else_);
2916  }
2917  if (has_valid_frame() || then.is_linked()) {
2918    then.Bind();
2919    Load(node->then_expression());
2920  }
2921  if (else_.is_linked()) {
2922    JumpTarget exit;
2923    if (has_valid_frame()) exit.Jump();
2924    else_.Bind();
2925    Load(node->else_expression());
2926    if (exit.is_linked()) exit.Bind();
2927  }
2928  ASSERT_EQ(original_height + 1, frame_->height());
2929}
2930
2931
2932void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2933  if (slot->type() == Slot::LOOKUP) {
2934    ASSERT(slot->var()->is_dynamic());
2935
2936    // JumpTargets do not yet support merging frames so the frame must be
2937    // spilled when jumping to these targets.
2938    JumpTarget slow;
2939    JumpTarget done;
2940
2941    // Generate fast case for loading from slots that correspond to
2942    // local/global variables or arguments unless they are shadowed by
2943    // eval-introduced bindings.
2944    EmitDynamicLoadFromSlotFastCase(slot,
2945                                    typeof_state,
2946                                    &slow,
2947                                    &done);
2948
2949    slow.Bind();
2950    frame_->EmitPush(cp);
2951    frame_->EmitPush(Operand(slot->var()->name()));
2952
2953    if (typeof_state == INSIDE_TYPEOF) {
2954      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2955    } else {
2956      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2957    }
2958
2959    done.Bind();
2960    frame_->EmitPush(r0);
2961
2962  } else {
2963    Register scratch = VirtualFrame::scratch0();
2964    TypeInfo info = type_info(slot);
2965    frame_->EmitPush(SlotOperand(slot, scratch), info);
2966
2967    if (slot->var()->mode() == Variable::CONST) {
2968      // Const slots may contain 'the hole' value (the constant hasn't been
2969      // initialized yet) which needs to be converted into the 'undefined'
2970      // value.
2971      Comment cmnt(masm_, "[ Unhole const");
2972      Register tos = frame_->PopToRegister();
2973      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2974      __ cmp(tos, ip);
2975      __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
2976      frame_->EmitPush(tos);
2977    }
2978  }
2979}
2980
2981
2982void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
2983                                                  TypeofState state) {
2984  VirtualFrame::RegisterAllocationScope scope(this);
2985  LoadFromSlot(slot, state);
2986
2987  // Bail out quickly if we're not using lazy arguments allocation.
2988  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
2989
2990  // ... or if the slot isn't a non-parameter arguments slot.
2991  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
2992
2993  // Load the loaded value from the stack into a register but leave it on the
2994  // stack.
2995  Register tos = frame_->Peek();
2996
2997  // If the loaded value is the sentinel that indicates that we
2998  // haven't loaded the arguments object yet, we need to do it now.
2999  JumpTarget exit;
3000  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3001  __ cmp(tos, ip);
3002  exit.Branch(ne);
3003  frame_->Drop();
3004  StoreArgumentsObject(false);
3005  exit.Bind();
3006}
3007
3008
3009void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
3010  ASSERT(slot != NULL);
3011  VirtualFrame::RegisterAllocationScope scope(this);
3012  if (slot->type() == Slot::LOOKUP) {
3013    ASSERT(slot->var()->is_dynamic());
3014
3015    // For now, just do a runtime call.
3016    frame_->EmitPush(cp);
3017    frame_->EmitPush(Operand(slot->var()->name()));
3018
3019    if (init_state == CONST_INIT) {
3020      // Same as the case for a normal store, but ignores attribute
3021      // (e.g. READ_ONLY) of context slot so that we can initialize
3022      // const properties (introduced via eval("const foo = (some
3023      // expr);")). Also, uses the current function context instead of
3024      // the top context.
3025      //
3026      // Note that we must declare the foo upon entry of eval(), via a
3027      // context slot declaration, but we cannot initialize it at the
3028      // same time, because the const declaration may be at the end of
3029      // the eval code (sigh...) and the const variable may have been
3030      // used before (where its value is 'undefined'). Thus, we can only
3031      // do the initialization when we actually encounter the expression
3032      // and when the expression operands are defined and valid, and
3033      // thus we need the split into 2 operations: declaration of the
3034      // context slot followed by initialization.
3035      frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
3036    } else {
3037      frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
3038    }
3039    // Storing a variable must keep the (new) value on the expression
3040    // stack. This is necessary for compiling assignment expressions.
3041    frame_->EmitPush(r0);
3042
3043  } else {
3044    ASSERT(!slot->var()->is_dynamic());
3045    Register scratch = VirtualFrame::scratch0();
3046    Register scratch2 = VirtualFrame::scratch1();
3047
3048    // The frame must be spilled when branching to this target.
3049    JumpTarget exit;
3050
3051    if (init_state == CONST_INIT) {
3052      ASSERT(slot->var()->mode() == Variable::CONST);
3053      // Only the first const initialization must be executed (the slot
3054      // still contains 'the hole' value). When the assignment is
3055      // executed, the code is identical to a normal store (see below).
3056      Comment cmnt(masm_, "[ Init const");
3057      __ ldr(scratch, SlotOperand(slot, scratch));
3058      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3059      __ cmp(scratch, ip);
3060      exit.Branch(ne);
3061    }
3062
3063    // We must execute the store.  Storing a variable must keep the
3064    // (new) value on the stack. This is necessary for compiling
3065    // assignment expressions.
3066    //
3067    // Note: We will reach here even with slot->var()->mode() ==
3068    // Variable::CONST because of const declarations which will
3069    // initialize consts to 'the hole' value and by doing so, end up
3070    // calling this code.  r2 may be loaded with context; used below in
3071    // RecordWrite.
3072    Register tos = frame_->Peek();
3073    __ str(tos, SlotOperand(slot, scratch));
3074    if (slot->type() == Slot::CONTEXT) {
3075      // Skip write barrier if the written value is a smi.
3076      __ tst(tos, Operand(kSmiTagMask));
3077      // We don't use tos any more after here.
3078      exit.Branch(eq);
3079      // scratch is loaded with context when calling SlotOperand above.
3080      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
3081      // We need an extra register.  Until we have a way to do that in the
3082      // virtual frame we will cheat and ask for a free TOS register.
3083      Register scratch3 = frame_->GetTOSRegister();
3084      __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
3085    }
3086    // If we definitely did not jump over the assignment, we do not need
3087    // to bind the exit label.  Doing so can defeat peephole
3088    // optimization.
3089    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
3090      exit.Bind();
3091    }
3092  }
3093}
3094
3095
3096void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
3097                                                      TypeofState typeof_state,
3098                                                      JumpTarget* slow) {
3099  // Check that no extension objects have been created by calls to
3100  // eval from the current scope to the global scope.
3101  Register tmp = frame_->scratch0();
3102  Register tmp2 = frame_->scratch1();
3103  Register context = cp;
3104  Scope* s = scope();
3105  while (s != NULL) {
3106    if (s->num_heap_slots() > 0) {
3107      if (s->calls_eval()) {
3108        frame_->SpillAll();
3109        // Check that extension is NULL.
3110        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
3111        __ tst(tmp2, tmp2);
3112        slow->Branch(ne);
3113      }
3114      // Load next context in chain.
3115      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
3116      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3117      context = tmp;
3118    }
3119    // If no outer scope calls eval, we do not need to check more
3120    // context extensions.
3121    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
3122    s = s->outer_scope();
3123  }
3124
3125  if (s->is_eval_scope()) {
3126    frame_->SpillAll();
3127    Label next, fast;
3128    __ Move(tmp, context);
3129    __ bind(&next);
3130    // Terminate at global context.
3131    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
3132    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
3133    __ cmp(tmp2, ip);
3134    __ b(eq, &fast);
3135    // Check that extension is NULL.
3136    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
3137    __ tst(tmp2, tmp2);
3138    slow->Branch(ne);
3139    // Load next context in chain.
3140    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
3141    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3142    __ b(&next);
3143    __ bind(&fast);
3144  }
3145
3146  // Load the global object.
3147  LoadGlobal();
3148  // Setup the name register and call load IC.
3149  frame_->CallLoadIC(slot->var()->name(),
3150                     typeof_state == INSIDE_TYPEOF
3151                         ? RelocInfo::CODE_TARGET
3152                         : RelocInfo::CODE_TARGET_CONTEXT);
3153}
3154
3155
3156void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
3157                                                    TypeofState typeof_state,
3158                                                    JumpTarget* slow,
3159                                                    JumpTarget* done) {
3160  // Generate fast-case code for variables that might be shadowed by
3161  // eval-introduced variables.  Eval is used a lot without
3162  // introducing variables.  In those cases, we do not want to
3163  // perform a runtime call for all variables in the scope
3164  // containing the eval.
3165  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3166    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
3167    frame_->SpillAll();
3168    done->Jump();
3169
3170  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3171    frame_->SpillAll();
3172    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
3173    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
3174    if (potential_slot != NULL) {
3175      // Generate fast case for locals that rewrite to slots.
3176      __ ldr(r0,
3177             ContextSlotOperandCheckExtensions(potential_slot,
3178                                               r1,
3179                                               r2,
3180                                               slow));
3181      if (potential_slot->var()->mode() == Variable::CONST) {
3182        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3183        __ cmp(r0, ip);
3184        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
3185      }
3186      done->Jump();
3187    } else if (rewrite != NULL) {
3188      // Generate fast case for argument loads.
3189      Property* property = rewrite->AsProperty();
3190      if (property != NULL) {
3191        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
3192        Literal* key_literal = property->key()->AsLiteral();
3193        if (obj_proxy != NULL &&
3194            key_literal != NULL &&
3195            obj_proxy->IsArguments() &&
3196            key_literal->handle()->IsSmi()) {
3197          // Load arguments object if there are no eval-introduced
3198          // variables. Then load the argument from the arguments
3199          // object using keyed load.
3200          __ ldr(r0,
3201                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
3202                                                   r1,
3203                                                   r2,
3204                                                   slow));
3205          frame_->EmitPush(r0);
3206          __ mov(r1, Operand(key_literal->handle()));
3207          frame_->EmitPush(r1);
3208          EmitKeyedLoad();
3209          done->Jump();
3210        }
3211      }
3212    }
3213  }
3214}
3215
3216
3217void CodeGenerator::VisitSlot(Slot* node) {
3218#ifdef DEBUG
3219  int original_height = frame_->height();
3220#endif
3221  Comment cmnt(masm_, "[ Slot");
3222  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3223  ASSERT_EQ(original_height + 1, frame_->height());
3224}
3225
3226
3227void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3228#ifdef DEBUG
3229  int original_height = frame_->height();
3230#endif
3231  Comment cmnt(masm_, "[ VariableProxy");
3232
3233  Variable* var = node->var();
3234  Expression* expr = var->rewrite();
3235  if (expr != NULL) {
3236    Visit(expr);
3237  } else {
3238    ASSERT(var->is_global());
3239    Reference ref(this, node);
3240    ref.GetValue();
3241  }
3242  ASSERT_EQ(original_height + 1, frame_->height());
3243}
3244
3245
3246void CodeGenerator::VisitLiteral(Literal* node) {
3247#ifdef DEBUG
3248  int original_height = frame_->height();
3249#endif
3250  Comment cmnt(masm_, "[ Literal");
3251  Register reg = frame_->GetTOSRegister();
3252  bool is_smi = node->handle()->IsSmi();
3253  __ mov(reg, Operand(node->handle()));
3254  frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
3255  ASSERT_EQ(original_height + 1, frame_->height());
3256}
3257
3258
3259void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3260#ifdef DEBUG
3261  int original_height = frame_->height();
3262#endif
3263  Comment cmnt(masm_, "[ RexExp Literal");
3264
3265  Register tmp = VirtualFrame::scratch0();
3266  // Free up a TOS register that can be used to push the literal.
3267  Register literal = frame_->GetTOSRegister();
3268
3269  // Retrieve the literal array and check the allocated entry.
3270
3271  // Load the function of this activation.
3272  __ ldr(tmp, frame_->Function());
3273
3274  // Load the literals array of the function.
3275  __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
3276
3277  // Load the literal at the ast saved index.
3278  int literal_offset =
3279      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3280  __ ldr(literal, FieldMemOperand(tmp, literal_offset));
3281
3282  JumpTarget done;
3283  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3284  __ cmp(literal, ip);
3285  // This branch locks the virtual frame at the done label to match the
3286  // one we have here, where the literal register is not on the stack and
3287  // nothing is spilled.
3288  done.Branch(ne);
3289
3290  // If the entry is undefined we call the runtime system to compute
3291  // the literal.
3292  // literal array  (0)
3293  frame_->EmitPush(tmp);
3294  // literal index  (1)
3295  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3296  // RegExp pattern (2)
3297  frame_->EmitPush(Operand(node->pattern()));
3298  // RegExp flags   (3)
3299  frame_->EmitPush(Operand(node->flags()));
3300  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3301  __ Move(literal, r0);
3302
3303  // This call to bind will get us back to the virtual frame we had before
3304  // where things are not spilled and the literal register is not on the stack.
3305  done.Bind();
3306  // Push the literal.
3307  frame_->EmitPush(literal);
3308  ASSERT_EQ(original_height + 1, frame_->height());
3309}
3310
3311
3312void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3313#ifdef DEBUG
3314  int original_height = frame_->height();
3315#endif
3316  Comment cmnt(masm_, "[ ObjectLiteral");
3317
3318  Register literal = frame_->GetTOSRegister();
3319  // Load the function of this activation.
3320  __ ldr(literal, frame_->Function());
3321  // Literal array.
3322  __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
3323  frame_->EmitPush(literal);
3324  // Literal index.
3325  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3326  // Constant properties.
3327  frame_->EmitPush(Operand(node->constant_properties()));
3328  // Should the object literal have fast elements?
3329  frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3330  if (node->depth() > 1) {
3331    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
3332  } else {
3333    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
3334  }
3335  frame_->EmitPush(r0);  // save the result
3336  for (int i = 0; i < node->properties()->length(); i++) {
3337    // At the start of each iteration, the top of stack contains
3338    // the newly created object literal.
3339    ObjectLiteral::Property* property = node->properties()->at(i);
3340    Literal* key = property->key();
3341    Expression* value = property->value();
3342    switch (property->kind()) {
3343      case ObjectLiteral::Property::CONSTANT:
3344        break;
3345      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3346        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3347        // else fall through
3348      case ObjectLiteral::Property::COMPUTED:
3349        if (key->handle()->IsSymbol()) {
3350          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
3351          Load(value);
3352          frame_->PopToR0();
3353          // Fetch the object literal.
3354          frame_->SpillAllButCopyTOSToR1();
3355          __ mov(r2, Operand(key->handle()));
3356          frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
3357          break;
3358        }
3359        // else fall through
3360      case ObjectLiteral::Property::PROTOTYPE: {
3361        frame_->Dup();
3362        Load(key);
3363        Load(value);
3364        frame_->CallRuntime(Runtime::kSetProperty, 3);
3365        break;
3366      }
3367      case ObjectLiteral::Property::SETTER: {
3368        frame_->Dup();
3369        Load(key);
3370        frame_->EmitPush(Operand(Smi::FromInt(1)));
3371        Load(value);
3372        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3373        break;
3374      }
3375      case ObjectLiteral::Property::GETTER: {
3376        frame_->Dup();
3377        Load(key);
3378        frame_->EmitPush(Operand(Smi::FromInt(0)));
3379        Load(value);
3380        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3381        break;
3382      }
3383    }
3384  }
3385  ASSERT_EQ(original_height + 1, frame_->height());
3386}
3387
3388
3389void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3390#ifdef DEBUG
3391  int original_height = frame_->height();
3392#endif
3393  Comment cmnt(masm_, "[ ArrayLiteral");
3394
3395  Register tos = frame_->GetTOSRegister();
3396  // Load the function of this activation.
3397  __ ldr(tos, frame_->Function());
3398  // Load the literals array of the function.
3399  __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
3400  frame_->EmitPush(tos);
3401  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3402  frame_->EmitPush(Operand(node->constant_elements()));
3403  int length = node->values()->length();
3404  if (node->depth() > 1) {
3405    frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
3406  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
3407    frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
3408  } else {
3409    FastCloneShallowArrayStub stub(length);
3410    frame_->CallStub(&stub, 3);
3411  }
3412  frame_->EmitPush(r0);  // save the result
3413  // r0: created object literal
3414
3415  // Generate code to set the elements in the array that are not
3416  // literals.
3417  for (int i = 0; i < node->values()->length(); i++) {
3418    Expression* value = node->values()->at(i);
3419
3420    // If value is a literal the property value is already set in the
3421    // boilerplate object.
3422    if (value->AsLiteral() != NULL) continue;
3423    // If value is a materialized literal the property value is already set
3424    // in the boilerplate object if it is simple.
3425    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
3426
3427    // The property must be set by generated code.
3428    Load(value);
3429    frame_->PopToR0();
3430    // Fetch the object literal.
3431    frame_->SpillAllButCopyTOSToR1();
3432
3433    // Get the elements array.
3434    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3435
3436    // Write to the indexed properties array.
3437    int offset = i * kPointerSize + FixedArray::kHeaderSize;
3438    __ str(r0, FieldMemOperand(r1, offset));
3439
3440    // Update the write barrier for the array address.
3441    __ RecordWrite(r1, Operand(offset), r3, r2);
3442  }
3443  ASSERT_EQ(original_height + 1, frame_->height());
3444}
3445
3446
3447void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3448#ifdef DEBUG
3449  int original_height = frame_->height();
3450#endif
3451  // Call runtime routine to allocate the catch extension object and
3452  // assign the exception value to the catch variable.
3453  Comment cmnt(masm_, "[ CatchExtensionObject");
3454  Load(node->key());
3455  Load(node->value());
3456  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
3457  frame_->EmitPush(r0);
3458  ASSERT_EQ(original_height + 1, frame_->height());
3459}
3460
3461
3462void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3463#ifdef DEBUG
3464  int original_height = frame_->height();
3465#endif
3466  Comment cmnt(masm(), "[ Variable Assignment");
3467  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3468  ASSERT(var != NULL);
3469  Slot* slot = var->slot();
3470  ASSERT(slot != NULL);
3471
3472  // Evaluate the right-hand side.
3473  if (node->is_compound()) {
3474    // For a compound assignment the right-hand side is a binary operation
3475    // between the current property value and the actual right-hand side.
3476    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
3477
3478    // Perform the binary operation.
3479    Literal* literal = node->value()->AsLiteral();
3480    bool overwrite_value =
3481        (node->value()->AsBinaryOperation() != NULL &&
3482         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3483    if (literal != NULL && literal->handle()->IsSmi()) {
3484      SmiOperation(node->binary_op(),
3485                   literal->handle(),
3486                   false,
3487                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3488    } else {
3489      GenerateInlineSmi inline_smi =
3490          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3491      if (literal != NULL) {
3492        ASSERT(!literal->handle()->IsSmi());
3493        inline_smi = DONT_GENERATE_INLINE_SMI;
3494      }
3495      Load(node->value());
3496      GenericBinaryOperation(node->binary_op(),
3497                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3498                             inline_smi);
3499    }
3500  } else {
3501    Load(node->value());
3502  }
3503
3504  // Perform the assignment.
3505  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3506    CodeForSourcePosition(node->position());
3507    StoreToSlot(slot,
3508                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
3509  }
3510  ASSERT_EQ(original_height + 1, frame_->height());
3511}
3512
3513
3514void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
3515#ifdef DEBUG
3516  int original_height = frame_->height();
3517#endif
3518  Comment cmnt(masm(), "[ Named Property Assignment");
3519  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3520  Property* prop = node->target()->AsProperty();
3521  ASSERT(var == NULL || (prop == NULL && var->is_global()));
3522
3523  // Initialize name and evaluate the receiver sub-expression if necessary. If
3524  // the receiver is trivial it is not placed on the stack at this point, but
3525  // loaded whenever actually needed.
3526  Handle<String> name;
3527  bool is_trivial_receiver = false;
3528  if (var != NULL) {
3529    name = var->name();
3530  } else {
3531    Literal* lit = prop->key()->AsLiteral();
3532    ASSERT_NOT_NULL(lit);
3533    name = Handle<String>::cast(lit->handle());
3534    // Do not materialize the receiver on the frame if it is trivial.
3535    is_trivial_receiver = prop->obj()->IsTrivial();
3536    if (!is_trivial_receiver) Load(prop->obj());
3537  }
3538
3539  // Change to slow case in the beginning of an initialization block to
3540  // avoid the quadratic behavior of repeatedly adding fast properties.
3541  if (node->starts_initialization_block()) {
3542    // Initialization block consists of assignments of the form expr.x = ..., so
3543    // this will never be an assignment to a variable, so there must be a
3544    // receiver object.
3545    ASSERT_EQ(NULL, var);
3546    if (is_trivial_receiver) {
3547      Load(prop->obj());
3548    } else {
3549      frame_->Dup();
3550    }
3551    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3552  }
3553
3554  // Change to fast case at the end of an initialization block. To prepare for
3555  // that add an extra copy of the receiver to the frame, so that it can be
3556  // converted back to fast case after the assignment.
3557  if (node->ends_initialization_block() && !is_trivial_receiver) {
3558    frame_->Dup();
3559  }
3560
3561  // Stack layout:
3562  // [tos]   : receiver (only materialized if non-trivial)
3563  // [tos+1] : receiver if at the end of an initialization block
3564
3565  // Evaluate the right-hand side.
3566  if (node->is_compound()) {
3567    // For a compound assignment the right-hand side is a binary operation
3568    // between the current property value and the actual right-hand side.
3569    if (is_trivial_receiver) {
3570      Load(prop->obj());
3571    } else if (var != NULL) {
3572      LoadGlobal();
3573    } else {
3574      frame_->Dup();
3575    }
3576    EmitNamedLoad(name, var != NULL);
3577
3578    // Perform the binary operation.
3579    Literal* literal = node->value()->AsLiteral();
3580    bool overwrite_value =
3581        (node->value()->AsBinaryOperation() != NULL &&
3582         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3583    if (literal != NULL && literal->handle()->IsSmi()) {
3584      SmiOperation(node->binary_op(),
3585                   literal->handle(),
3586                   false,
3587                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3588    } else {
3589      GenerateInlineSmi inline_smi =
3590          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3591      if (literal != NULL) {
3592        ASSERT(!literal->handle()->IsSmi());
3593        inline_smi = DONT_GENERATE_INLINE_SMI;
3594      }
3595      Load(node->value());
3596      GenericBinaryOperation(node->binary_op(),
3597                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3598                             inline_smi);
3599    }
3600  } else {
3601    // For non-compound assignment just load the right-hand side.
3602    Load(node->value());
3603  }
3604
3605  // Stack layout:
3606  // [tos]   : value
3607  // [tos+1] : receiver (only materialized if non-trivial)
3608  // [tos+2] : receiver if at the end of an initialization block
3609
3610  // Perform the assignment.  It is safe to ignore constants here.
3611  ASSERT(var == NULL || var->mode() != Variable::CONST);
3612  ASSERT_NE(Token::INIT_CONST, node->op());
3613  if (is_trivial_receiver) {
3614    // Load the receiver and swap with the value.
3615    Load(prop->obj());
3616    Register t0 = frame_->PopToRegister();
3617    Register t1 = frame_->PopToRegister(t0);
3618    frame_->EmitPush(t0);
3619    frame_->EmitPush(t1);
3620  }
3621  CodeForSourcePosition(node->position());
3622  bool is_contextual = (var != NULL);
3623  EmitNamedStore(name, is_contextual);
3624  frame_->EmitPush(r0);
3625
3626  // Change to fast case at the end of an initialization block.
3627  if (node->ends_initialization_block()) {
3628    ASSERT_EQ(NULL, var);
3629    // The argument to the runtime call is the receiver.
3630    if (is_trivial_receiver) {
3631      Load(prop->obj());
3632    } else {
3633      // A copy of the receiver is below the value of the assignment. Swap
3634      // the receiver and the value of the assignment expression.
3635      Register t0 = frame_->PopToRegister();
3636      Register t1 = frame_->PopToRegister(t0);
3637      frame_->EmitPush(t0);
3638      frame_->EmitPush(t1);
3639    }
3640    frame_->CallRuntime(Runtime::kToFastProperties, 1);
3641  }
3642
3643  // Stack layout:
3644  // [tos]   : result
3645
3646  ASSERT_EQ(original_height + 1, frame_->height());
3647}
3648
3649
3650void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
3651#ifdef DEBUG
3652  int original_height = frame_->height();
3653#endif
3654  Comment cmnt(masm_, "[ Keyed Property Assignment");
3655  Property* prop = node->target()->AsProperty();
3656  ASSERT_NOT_NULL(prop);
3657
3658  // Evaluate the receiver subexpression.
3659  Load(prop->obj());
3660
3661  WriteBarrierCharacter wb_info;
3662
3663  // Change to slow case in the beginning of an initialization block to
3664  // avoid the quadratic behavior of repeatedly adding fast properties.
3665  if (node->starts_initialization_block()) {
3666    frame_->Dup();
3667    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3668  }
3669
3670  // Change to fast case at the end of an initialization block. To prepare for
3671  // that add an extra copy of the receiver to the frame, so that it can be
3672  // converted back to fast case after the assignment.
3673  if (node->ends_initialization_block()) {
3674    frame_->Dup();
3675  }
3676
3677  // Evaluate the key subexpression.
3678  Load(prop->key());
3679
3680  // Stack layout:
3681  // [tos]   : key
3682  // [tos+1] : receiver
3683  // [tos+2] : receiver if at the end of an initialization block
3684  //
3685  // Evaluate the right-hand side.
3686  if (node->is_compound()) {
3687    // For a compound assignment the right-hand side is a binary operation
3688    // between the current property value and the actual right-hand side.
3689    // Duplicate receiver and key for loading the current property value.
3690    frame_->Dup2();
3691    EmitKeyedLoad();
3692    frame_->EmitPush(r0);
3693
3694    // Perform the binary operation.
3695    Literal* literal = node->value()->AsLiteral();
3696    bool overwrite_value =
3697        (node->value()->AsBinaryOperation() != NULL &&
3698         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3699    if (literal != NULL && literal->handle()->IsSmi()) {
3700      SmiOperation(node->binary_op(),
3701                   literal->handle(),
3702                   false,
3703                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3704    } else {
3705      GenerateInlineSmi inline_smi =
3706          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3707      if (literal != NULL) {
3708        ASSERT(!literal->handle()->IsSmi());
3709        inline_smi = DONT_GENERATE_INLINE_SMI;
3710      }
3711      Load(node->value());
3712      GenericBinaryOperation(node->binary_op(),
3713                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3714                             inline_smi);
3715    }
3716    wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
3717  } else {
3718    // For non-compound assignment just load the right-hand side.
3719    Load(node->value());
3720    wb_info = node->value()->AsLiteral() != NULL ?
3721        NEVER_NEWSPACE :
3722        (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
3723  }
3724
3725  // Stack layout:
3726  // [tos]   : value
3727  // [tos+1] : key
3728  // [tos+2] : receiver
3729  // [tos+3] : receiver if at the end of an initialization block
3730
3731  // Perform the assignment.  It is safe to ignore constants here.
3732  ASSERT(node->op() != Token::INIT_CONST);
3733  CodeForSourcePosition(node->position());
3734  EmitKeyedStore(prop->key()->type(), wb_info);
3735  frame_->EmitPush(r0);
3736
3737  // Stack layout:
3738  // [tos]   : result
3739  // [tos+1] : receiver if at the end of an initialization block
3740
3741  // Change to fast case at the end of an initialization block.
3742  if (node->ends_initialization_block()) {
3743    // The argument to the runtime call is the extra copy of the receiver,
3744    // which is below the value of the assignment.  Swap the receiver and
3745    // the value of the assignment expression.
3746    Register t0 = frame_->PopToRegister();
3747    Register t1 = frame_->PopToRegister(t0);
3748    frame_->EmitPush(t1);
3749    frame_->EmitPush(t0);
3750    frame_->CallRuntime(Runtime::kToFastProperties, 1);
3751  }
3752
3753  // Stack layout:
3754  // [tos]   : result
3755
3756  ASSERT_EQ(original_height + 1, frame_->height());
3757}
3758
3759
3760void CodeGenerator::VisitAssignment(Assignment* node) {
3761  VirtualFrame::RegisterAllocationScope scope(this);
3762#ifdef DEBUG
3763  int original_height = frame_->height();
3764#endif
3765  Comment cmnt(masm_, "[ Assignment");
3766
3767  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3768  Property* prop = node->target()->AsProperty();
3769
3770  if (var != NULL && !var->is_global()) {
3771    EmitSlotAssignment(node);
3772
3773  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
3774             (var != NULL && var->is_global())) {
3775    // Properties whose keys are property names and global variables are
3776    // treated as named property references.  We do not need to consider
3777    // global 'this' because it is not a valid left-hand side.
3778    EmitNamedPropertyAssignment(node);
3779
3780  } else if (prop != NULL) {
3781    // Other properties (including rewritten parameters for a function that
3782    // uses arguments) are keyed property assignments.
3783    EmitKeyedPropertyAssignment(node);
3784
3785  } else {
3786    // Invalid left-hand side.
3787    Load(node->target());
3788    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
3789    // The runtime call doesn't actually return but the code generator will
3790    // still generate code and expects a certain frame height.
3791    frame_->EmitPush(r0);
3792  }
3793  ASSERT_EQ(original_height + 1, frame_->height());
3794}
3795
3796
3797void CodeGenerator::VisitThrow(Throw* node) {
3798#ifdef DEBUG
3799  int original_height = frame_->height();
3800#endif
3801  Comment cmnt(masm_, "[ Throw");
3802
3803  Load(node->exception());
3804  CodeForSourcePosition(node->position());
3805  frame_->CallRuntime(Runtime::kThrow, 1);
3806  frame_->EmitPush(r0);
3807  ASSERT_EQ(original_height + 1, frame_->height());
3808}
3809
3810
3811void CodeGenerator::VisitProperty(Property* node) {
3812#ifdef DEBUG
3813  int original_height = frame_->height();
3814#endif
3815  Comment cmnt(masm_, "[ Property");
3816
3817  { Reference property(this, node);
3818    property.GetValue();
3819  }
3820  ASSERT_EQ(original_height + 1, frame_->height());
3821}
3822
3823
3824void CodeGenerator::VisitCall(Call* node) {
3825#ifdef DEBUG
3826  int original_height = frame_->height();
3827#endif
3828  Comment cmnt(masm_, "[ Call");
3829
3830  Expression* function = node->expression();
3831  ZoneList<Expression*>* args = node->arguments();
3832
3833  // Standard function call.
3834  // Check if the function is a variable or a property.
3835  Variable* var = function->AsVariableProxy()->AsVariable();
3836  Property* property = function->AsProperty();
3837
3838  // ------------------------------------------------------------------------
3839  // Fast-case: Use inline caching.
3840  // ---
3841  // According to ECMA-262, section 11.2.3, page 44, the function to call
3842  // must be resolved after the arguments have been evaluated. The IC code
3843  // automatically handles this by loading the arguments before the function
3844  // is resolved in cache misses (this also holds for megamorphic calls).
3845  // ------------------------------------------------------------------------
3846
3847  if (var != NULL && var->is_possibly_eval()) {
3848    // ----------------------------------
3849    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
3850    // ----------------------------------
3851
3852    // In a call to eval, we first call %ResolvePossiblyDirectEval to
3853    // resolve the function we need to call and the receiver of the
3854    // call.  Then we call the resolved function using the given
3855    // arguments.
3856
3857    // Prepare stack for call to resolved function.
3858    Load(function);
3859
3860    // Allocate a frame slot for the receiver.
3861    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
3862
3863    // Load the arguments.
3864    int arg_count = args->length();
3865    for (int i = 0; i < arg_count; i++) {
3866      Load(args->at(i));
3867    }
3868
3869    VirtualFrame::SpilledScope spilled_scope(frame_);
3870
3871    // If we know that eval can only be shadowed by eval-introduced
3872    // variables we attempt to load the global eval function directly
3873    // in generated code. If we succeed, there is no need to perform a
3874    // context lookup in the runtime system.
3875    JumpTarget done;
3876    if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
3877      ASSERT(var->slot()->type() == Slot::LOOKUP);
3878      JumpTarget slow;
3879      // Prepare the stack for the call to
3880      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
3881      // function, the first argument to the eval call and the
3882      // receiver.
3883      LoadFromGlobalSlotCheckExtensions(var->slot(),
3884                                        NOT_INSIDE_TYPEOF,
3885                                        &slow);
3886      frame_->EmitPush(r0);
3887      if (arg_count > 0) {
3888        __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3889        frame_->EmitPush(r1);
3890      } else {
3891        frame_->EmitPush(r2);
3892      }
3893      __ ldr(r1, frame_->Receiver());
3894      frame_->EmitPush(r1);
3895
3896      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
3897
3898      done.Jump();
3899      slow.Bind();
3900    }
3901
3902    // Prepare the stack for the call to ResolvePossiblyDirectEval by
3903    // pushing the loaded function, the first argument to the eval
3904    // call and the receiver.
3905    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
3906    frame_->EmitPush(r1);
3907    if (arg_count > 0) {
3908      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3909      frame_->EmitPush(r1);
3910    } else {
3911      frame_->EmitPush(r2);
3912    }
3913    __ ldr(r1, frame_->Receiver());
3914    frame_->EmitPush(r1);
3915
3916    // Resolve the call.
3917    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
3918
3919    // If we generated fast-case code bind the jump-target where fast
3920    // and slow case merge.
3921    if (done.is_linked()) done.Bind();
3922
3923    // Touch up stack with the right values for the function and the receiver.
3924    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
3925    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
3926
3927    // Call the function.
3928    CodeForSourcePosition(node->position());
3929
3930    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3931    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
3932    frame_->CallStub(&call_function, arg_count + 1);
3933
3934    __ ldr(cp, frame_->Context());
3935    // Remove the function from the stack.
3936    frame_->Drop();
3937    frame_->EmitPush(r0);
3938
3939  } else if (var != NULL && !var->is_this() && var->is_global()) {
3940    // ----------------------------------
3941    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
3942    // ----------------------------------
3943    // Pass the global object as the receiver and let the IC stub
3944    // patch the stack to use the global proxy as 'this' in the
3945    // invoked function.
3946    LoadGlobal();
3947
3948    // Load the arguments.
3949    int arg_count = args->length();
3950    for (int i = 0; i < arg_count; i++) {
3951      Load(args->at(i));
3952    }
3953
3954    VirtualFrame::SpilledScope spilled_scope(frame_);
3955    // Setup the name register and call the IC initialization code.
3956    __ mov(r2, Operand(var->name()));
3957    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3958    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3959    CodeForSourcePosition(node->position());
3960    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3961                           arg_count + 1);
3962    __ ldr(cp, frame_->Context());
3963    frame_->EmitPush(r0);
3964
3965  } else if (var != NULL && var->slot() != NULL &&
3966             var->slot()->type() == Slot::LOOKUP) {
3967    VirtualFrame::SpilledScope spilled_scope(frame_);
3968    // ----------------------------------
3969    // JavaScript examples:
3970    //
3971    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
3972    //
3973    //  function f() {};
3974    //  function g() {
3975    //    eval(...);
3976    //    f();  // f could be in extension object.
3977    //  }
3978    // ----------------------------------
3979
3980    // JumpTargets do not yet support merging frames so the frame must be
3981    // spilled when jumping to these targets.
3982    JumpTarget slow, done;
3983
3984    // Generate fast case for loading functions from slots that
3985    // correspond to local/global variables or arguments unless they
3986    // are shadowed by eval-introduced bindings.
3987    EmitDynamicLoadFromSlotFastCase(var->slot(),
3988                                    NOT_INSIDE_TYPEOF,
3989                                    &slow,
3990                                    &done);
3991
3992    slow.Bind();
3993    // Load the function
3994    frame_->EmitPush(cp);
3995    __ mov(r0, Operand(var->name()));
3996    frame_->EmitPush(r0);
3997    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3998    // r0: slot value; r1: receiver
3999
4000    // Load the receiver.
4001    frame_->EmitPush(r0);  // function
4002    frame_->EmitPush(r1);  // receiver
4003
4004    // If fast case code has been generated, emit code to push the
4005    // function and receiver and have the slow path jump around this
4006    // code.
4007    if (done.is_linked()) {
4008      JumpTarget call;
4009      call.Jump();
4010      done.Bind();
4011      frame_->EmitPush(r0);  // function
4012      LoadGlobalReceiver(r1);  // receiver
4013      call.Bind();
4014    }
4015
4016    // Call the function. At this point, everything is spilled but the
4017    // function and receiver are in r0 and r1.
4018    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4019    frame_->EmitPush(r0);
4020
4021  } else if (property != NULL) {
4022    // Check if the key is a literal string.
4023    Literal* literal = property->key()->AsLiteral();
4024
4025    if (literal != NULL && literal->handle()->IsSymbol()) {
4026      // ------------------------------------------------------------------
4027      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
4028      // ------------------------------------------------------------------
4029
4030      Handle<String> name = Handle<String>::cast(literal->handle());
4031
4032      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
4033          name->IsEqualTo(CStrVector("apply")) &&
4034          args->length() == 2 &&
4035          args->at(1)->AsVariableProxy() != NULL &&
4036          args->at(1)->AsVariableProxy()->IsArguments()) {
4037        // Use the optimized Function.prototype.apply that avoids
4038        // allocating lazily allocated arguments objects.
4039        CallApplyLazy(property->obj(),
4040                      args->at(0),
4041                      args->at(1)->AsVariableProxy(),
4042                      node->position());
4043
4044      } else {
4045        Load(property->obj());  // Receiver.
4046        // Load the arguments.
4047        int arg_count = args->length();
4048        for (int i = 0; i < arg_count; i++) {
4049          Load(args->at(i));
4050        }
4051
4052        VirtualFrame::SpilledScope spilled_scope(frame_);
4053        // Set the name register and call the IC initialization code.
4054        __ mov(r2, Operand(name));
4055        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4056        Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
4057        CodeForSourcePosition(node->position());
4058        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4059        __ ldr(cp, frame_->Context());
4060        frame_->EmitPush(r0);
4061      }
4062
4063    } else {
4064      // -------------------------------------------
4065      // JavaScript example: 'array[index](1, 2, 3)'
4066      // -------------------------------------------
4067      VirtualFrame::SpilledScope spilled_scope(frame_);
4068
4069      Load(property->obj());
4070      if (property->is_synthetic()) {
4071        Load(property->key());
4072        EmitKeyedLoad();
4073        // Put the function below the receiver.
4074        // Use the global receiver.
4075        frame_->EmitPush(r0);  // Function.
4076        LoadGlobalReceiver(r0);
4077        // Call the function.
4078        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
4079        frame_->EmitPush(r0);
4080      } else {
4081        // Load the arguments.
4082        int arg_count = args->length();
4083        for (int i = 0; i < arg_count; i++) {
4084          Load(args->at(i));
4085        }
4086
4087        // Set the name register and call the IC initialization code.
4088        Load(property->key());
4089        frame_->EmitPop(r2);  // Function name.
4090
4091        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4092        Handle<Code> stub = ComputeKeyedCallInitialize(arg_count, in_loop);
4093        CodeForSourcePosition(node->position());
4094        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4095        __ ldr(cp, frame_->Context());
4096        frame_->EmitPush(r0);
4097      }
4098    }
4099
4100  } else {
4101    // ----------------------------------
4102    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
4103    // ----------------------------------
4104
4105    // Load the function.
4106    Load(function);
4107
4108    VirtualFrame::SpilledScope spilled_scope(frame_);
4109
4110    // Pass the global proxy as the receiver.
4111    LoadGlobalReceiver(r0);
4112
4113    // Call the function.
4114    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4115    frame_->EmitPush(r0);
4116  }
4117  ASSERT_EQ(original_height + 1, frame_->height());
4118}
4119
4120
4121void CodeGenerator::VisitCallNew(CallNew* node) {
4122#ifdef DEBUG
4123  int original_height = frame_->height();
4124#endif
4125  Comment cmnt(masm_, "[ CallNew");
4126
4127  // According to ECMA-262, section 11.2.2, page 44, the function
4128  // expression in new calls must be evaluated before the
4129  // arguments. This is different from ordinary calls, where the
4130  // actual function to call is resolved after the arguments have been
4131  // evaluated.
4132
4133  // Compute function to call and use the global object as the
4134  // receiver. There is no need to use the global proxy here because
4135  // it will always be replaced with a newly allocated object.
4136  Load(node->expression());
4137  LoadGlobal();
4138
4139  // Push the arguments ("left-to-right") on the stack.
4140  ZoneList<Expression*>* args = node->arguments();
4141  int arg_count = args->length();
4142  for (int i = 0; i < arg_count; i++) {
4143    Load(args->at(i));
4144  }
4145
4146  VirtualFrame::SpilledScope spilled_scope(frame_);
4147
4148  // r0: the number of arguments.
4149  __ mov(r0, Operand(arg_count));
4150  // Load the function into r1 as per calling convention.
4151  __ ldr(r1, frame_->ElementAt(arg_count + 1));
4152
4153  // Call the construct call builtin that handles allocation and
4154  // constructor invocation.
4155  CodeForSourcePosition(node->position());
4156  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
4157  frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
4158
4159  // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
4160  __ str(r0, frame_->Top());
4161  ASSERT_EQ(original_height + 1, frame_->height());
4162}
4163
4164
4165void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4166  VirtualFrame::SpilledScope spilled_scope(frame_);
4167  ASSERT(args->length() == 1);
4168  JumpTarget leave, null, function, non_function_constructor;
4169
4170  // Load the object into r0.
4171  Load(args->at(0));
4172  frame_->EmitPop(r0);
4173
4174  // If the object is a smi, we return null.
4175  __ tst(r0, Operand(kSmiTagMask));
4176  null.Branch(eq);
4177
4178  // Check that the object is a JS object but take special care of JS
4179  // functions to make sure they have 'Function' as their class.
4180  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
4181  null.Branch(lt);
4182
4183  // As long as JS_FUNCTION_TYPE is the last instance type and it is
4184  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4185  // LAST_JS_OBJECT_TYPE.
4186  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4187  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4188  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
4189  function.Branch(eq);
4190
4191  // Check if the constructor in the map is a function.
4192  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
4193  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
4194  non_function_constructor.Branch(ne);
4195
4196  // The r0 register now contains the constructor function. Grab the
4197  // instance class name from there.
4198  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
4199  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
4200  frame_->EmitPush(r0);
4201  leave.Jump();
4202
4203  // Functions have class 'Function'.
4204  function.Bind();
4205  __ mov(r0, Operand(Factory::function_class_symbol()));
4206  frame_->EmitPush(r0);
4207  leave.Jump();
4208
4209  // Objects with a non-function constructor have class 'Object'.
4210  non_function_constructor.Bind();
4211  __ mov(r0, Operand(Factory::Object_symbol()));
4212  frame_->EmitPush(r0);
4213  leave.Jump();
4214
4215  // Non-JS objects have class null.
4216  null.Bind();
4217  __ LoadRoot(r0, Heap::kNullValueRootIndex);
4218  frame_->EmitPush(r0);
4219
4220  // All done.
4221  leave.Bind();
4222}
4223
4224
4225void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4226  VirtualFrame::SpilledScope spilled_scope(frame_);
4227  ASSERT(args->length() == 1);
4228  JumpTarget leave;
4229  Load(args->at(0));
4230  frame_->EmitPop(r0);  // r0 contains object.
4231  // if (object->IsSmi()) return the object.
4232  __ tst(r0, Operand(kSmiTagMask));
4233  leave.Branch(eq);
4234  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4235  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
4236  leave.Branch(ne);
4237  // Load the value.
4238  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
4239  leave.Bind();
4240  frame_->EmitPush(r0);
4241}
4242
4243
4244void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4245  VirtualFrame::SpilledScope spilled_scope(frame_);
4246  ASSERT(args->length() == 2);
4247  JumpTarget leave;
4248  Load(args->at(0));    // Load the object.
4249  Load(args->at(1));    // Load the value.
4250  frame_->EmitPop(r0);  // r0 contains value
4251  frame_->EmitPop(r1);  // r1 contains object
4252  // if (object->IsSmi()) return object.
4253  __ tst(r1, Operand(kSmiTagMask));
4254  leave.Branch(eq);
4255  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4256  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
4257  leave.Branch(ne);
4258  // Store the value.
4259  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
4260  // Update the write barrier.
4261  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
4262  // Leave.
4263  leave.Bind();
4264  frame_->EmitPush(r0);
4265}
4266
4267
4268void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4269  ASSERT(args->length() == 1);
4270  Load(args->at(0));
4271  Register reg = frame_->PopToRegister();
4272  __ tst(reg, Operand(kSmiTagMask));
4273  cc_reg_ = eq;
4274}
4275
4276
4277void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
4278  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
4279  ASSERT_EQ(args->length(), 3);
4280#ifdef ENABLE_LOGGING_AND_PROFILING
4281  if (ShouldGenerateLog(args->at(0))) {
4282    Load(args->at(1));
4283    Load(args->at(2));
4284    frame_->CallRuntime(Runtime::kLog, 2);
4285  }
4286#endif
4287  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4288}
4289
4290
4291void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4292  ASSERT(args->length() == 1);
4293  Load(args->at(0));
4294  Register reg = frame_->PopToRegister();
4295  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
4296  cc_reg_ = eq;
4297}
4298
4299
4300// Generates the Math.pow method.
4301void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4302  ASSERT(args->length() == 2);
4303  Load(args->at(0));
4304  Load(args->at(1));
4305
4306  if (!CpuFeatures::IsSupported(VFP3)) {
4307    frame_->CallRuntime(Runtime::kMath_pow, 2);
4308    frame_->EmitPush(r0);
4309  } else {
4310    CpuFeatures::Scope scope(VFP3);
4311    JumpTarget runtime, done;
4312    Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
4313
4314    Register scratch1 = VirtualFrame::scratch0();
4315    Register scratch2 = VirtualFrame::scratch1();
4316
4317    // Get base and exponent to registers.
4318    Register exponent = frame_->PopToRegister();
4319    Register base = frame_->PopToRegister(exponent);
4320    Register heap_number_map = no_reg;
4321
4322    // Set the frame for the runtime jump target. The code below jumps to the
4323    // jump target label so the frame needs to be established before that.
4324    ASSERT(runtime.entry_frame() == NULL);
4325    runtime.set_entry_frame(frame_);
4326
4327    __ BranchOnNotSmi(exponent, &exponent_nonsmi);
4328    __ BranchOnNotSmi(base, &base_nonsmi);
4329
4330    heap_number_map = r6;
4331    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4332
4333    // Exponent is a smi and base is a smi. Get the smi value into vfp register
4334    // d1.
4335    __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
4336    __ b(&powi);
4337
4338    __ bind(&base_nonsmi);
4339    // Exponent is smi and base is non smi. Get the double value from the base
4340    // into vfp register d1.
4341    __ ObjectToDoubleVFPRegister(base, d1,
4342                                 scratch1, scratch2, heap_number_map, s0,
4343                                 runtime.entry_label());
4344
4345    __ bind(&powi);
4346
4347    // Load 1.0 into d0.
4348    __ vmov(d0, 1.0);
4349
4350    // Get the absolute untagged value of the exponent and use that for the
4351    // calculation.
4352    __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
4353    __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi);  // Negate if negative.
4354    __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.
4355
4356    // Run through all the bits in the exponent. The result is calculated in d0
4357    // and d1 holds base^(bit^2).
4358    Label more_bits;
4359    __ bind(&more_bits);
4360    __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
4361    __ vmul(d0, d0, d1, cs);  // Multiply with base^(bit^2) if bit is set.
4362    __ vmul(d1, d1, d1, ne);  // Don't bother calculating next d1 if done.
4363    __ b(ne, &more_bits);
4364
4365    // If exponent is positive we are done.
4366    __ cmp(exponent, Operand(0));
4367    __ b(ge, &allocate_return);
4368
4369    // If exponent is negative result is 1/result (d2 already holds 1.0 in that
4370    // case). However if d0 has reached infinity this will not provide the
4371    // correct result, so call runtime if that is the case.
4372    __ mov(scratch2, Operand(0x7FF00000));
4373    __ mov(scratch1, Operand(0));
4374    __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
4375    __ vcmp(d0, d1);
4376    __ vmrs(pc);
4377    runtime.Branch(eq);  // d0 reached infinity.
4378    __ vdiv(d0, d2, d0);
4379    __ b(&allocate_return);
4380
4381    __ bind(&exponent_nonsmi);
4382    // Special handling of raising to the power of -0.5 and 0.5. First check
4383    // that the value is a heap number and that the lower bits (which for both
4384    // values are zero).
4385    heap_number_map = r6;
4386    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4387    __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
4388    __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
4389    __ cmp(scratch1, heap_number_map);
4390    runtime.Branch(ne);
4391    __ tst(scratch2, scratch2);
4392    runtime.Branch(ne);
4393
4394    // Load the higher bits (which contains the floating point exponent).
4395    __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
4396
4397    // Compare exponent with -0.5.
4398    __ cmp(scratch1, Operand(0xbfe00000));
4399    __ b(ne, &not_minus_half);
4400
4401    // Get the double value from the base into vfp register d0.
4402    __ ObjectToDoubleVFPRegister(base, d0,
4403                                 scratch1, scratch2, heap_number_map, s0,
4404                                 runtime.entry_label(),
4405                                 AVOID_NANS_AND_INFINITIES);
4406
4407    // Load 1.0 into d2.
4408    __ vmov(d2, 1.0);
4409
4410    // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
4411    __ vdiv(d0, d2, d0);
4412    __ vsqrt(d0, d0);
4413
4414    __ b(&allocate_return);
4415
4416    __ bind(&not_minus_half);
4417    // Compare exponent with 0.5.
4418    __ cmp(scratch1, Operand(0x3fe00000));
4419    runtime.Branch(ne);
4420
4421      // Get the double value from the base into vfp register d0.
4422    __ ObjectToDoubleVFPRegister(base, d0,
4423                                 scratch1, scratch2, heap_number_map, s0,
4424                                 runtime.entry_label(),
4425                                 AVOID_NANS_AND_INFINITIES);
4426    __ vsqrt(d0, d0);
4427
4428    __ bind(&allocate_return);
4429    Register scratch3 = r5;
4430    __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
4431                                   heap_number_map, runtime.entry_label());
4432    __ mov(base, scratch3);
4433    done.Jump();
4434
4435    runtime.Bind();
4436
4437    // Push back the arguments again for the runtime call.
4438    frame_->EmitPush(base);
4439    frame_->EmitPush(exponent);
4440    frame_->CallRuntime(Runtime::kMath_pow, 2);
4441    __ Move(base, r0);
4442
4443    done.Bind();
4444    frame_->EmitPush(base);
4445  }
4446}
4447
4448
4449// Generates the Math.sqrt method.
4450void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4451  ASSERT(args->length() == 1);
4452  Load(args->at(0));
4453
4454  if (!CpuFeatures::IsSupported(VFP3)) {
4455    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4456    frame_->EmitPush(r0);
4457  } else {
4458    CpuFeatures::Scope scope(VFP3);
4459    JumpTarget runtime, done;
4460
4461    Register scratch1 = VirtualFrame::scratch0();
4462    Register scratch2 = VirtualFrame::scratch1();
4463
4464    // Get the value from the frame.
4465    Register tos = frame_->PopToRegister();
4466
4467    // Set the frame for the runtime jump target. The code below jumps to the
4468    // jump target label so the frame needs to be established before that.
4469    ASSERT(runtime.entry_frame() == NULL);
4470    runtime.set_entry_frame(frame_);
4471
4472    Register heap_number_map = r6;
4473    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4474
4475    // Get the double value from the heap number into vfp register d0.
4476    __ ObjectToDoubleVFPRegister(tos, d0,
4477                                 scratch1, scratch2, heap_number_map, s0,
4478                                 runtime.entry_label());
4479
4480    // Calculate the square root of d0 and place result in a heap number object.
4481    __ vsqrt(d0, d0);
4482    __ AllocateHeapNumberWithValue(
4483        tos, d0, scratch1, scratch2, heap_number_map, runtime.entry_label());
4484    done.Jump();
4485
4486    runtime.Bind();
4487    // Push back the argument again for the runtime call.
4488    frame_->EmitPush(tos);
4489    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4490    __ Move(tos, r0);
4491
4492    done.Bind();
4493    frame_->EmitPush(tos);
4494  }
4495}
4496
4497
4498class DeferredStringCharCodeAt : public DeferredCode {
4499 public:
4500  DeferredStringCharCodeAt(Register object,
4501                           Register index,
4502                           Register scratch,
4503                           Register result)
4504      : result_(result),
4505        char_code_at_generator_(object,
4506                                index,
4507                                scratch,
4508                                result,
4509                                &need_conversion_,
4510                                &need_conversion_,
4511                                &index_out_of_range_,
4512                                STRING_INDEX_IS_NUMBER) {}
4513
4514  StringCharCodeAtGenerator* fast_case_generator() {
4515    return &char_code_at_generator_;
4516  }
4517
4518  virtual void Generate() {
4519    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4520    char_code_at_generator_.GenerateSlow(masm(), call_helper);
4521
4522    __ bind(&need_conversion_);
4523    // Move the undefined value into the result register, which will
4524    // trigger conversion.
4525    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
4526    __ jmp(exit_label());
4527
4528    __ bind(&index_out_of_range_);
4529    // When the index is out of range, the spec requires us to return
4530    // NaN.
4531    __ LoadRoot(result_, Heap::kNanValueRootIndex);
4532    __ jmp(exit_label());
4533  }
4534
4535 private:
4536  Register result_;
4537
4538  Label need_conversion_;
4539  Label index_out_of_range_;
4540
4541  StringCharCodeAtGenerator char_code_at_generator_;
4542};
4543
4544
4545// This generates code that performs a String.prototype.charCodeAt() call
4546// or returns a smi in order to trigger conversion.
4547void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
4548  VirtualFrame::SpilledScope spilled_scope(frame_);
4549  Comment(masm_, "[ GenerateStringCharCodeAt");
4550  ASSERT(args->length() == 2);
4551
4552  Load(args->at(0));
4553  Load(args->at(1));
4554
4555  Register index = r1;
4556  Register object = r2;
4557
4558  frame_->EmitPop(r1);
4559  frame_->EmitPop(r2);
4560
4561  // We need two extra registers.
4562  Register scratch = r3;
4563  Register result = r0;
4564
4565  DeferredStringCharCodeAt* deferred =
4566      new DeferredStringCharCodeAt(object,
4567                                   index,
4568                                   scratch,
4569                                   result);
4570  deferred->fast_case_generator()->GenerateFast(masm_);
4571  deferred->BindExit();
4572  frame_->EmitPush(result);
4573}
4574
4575
4576class DeferredStringCharFromCode : public DeferredCode {
4577 public:
4578  DeferredStringCharFromCode(Register code,
4579                             Register result)
4580      : char_from_code_generator_(code, result) {}
4581
4582  StringCharFromCodeGenerator* fast_case_generator() {
4583    return &char_from_code_generator_;
4584  }
4585
4586  virtual void Generate() {
4587    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4588    char_from_code_generator_.GenerateSlow(masm(), call_helper);
4589  }
4590
4591 private:
4592  StringCharFromCodeGenerator char_from_code_generator_;
4593};
4594
4595
4596// Generates code for creating a one-char string from a char code.
4597void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
4598  VirtualFrame::SpilledScope spilled_scope(frame_);
4599  Comment(masm_, "[ GenerateStringCharFromCode");
4600  ASSERT(args->length() == 1);
4601
4602  Load(args->at(0));
4603
4604  Register code = r1;
4605  Register result = r0;
4606
4607  frame_->EmitPop(code);
4608
4609  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
4610      code, result);
4611  deferred->fast_case_generator()->GenerateFast(masm_);
4612  deferred->BindExit();
4613  frame_->EmitPush(result);
4614}
4615
4616
4617class DeferredStringCharAt : public DeferredCode {
4618 public:
4619  DeferredStringCharAt(Register object,
4620                       Register index,
4621                       Register scratch1,
4622                       Register scratch2,
4623                       Register result)
4624      : result_(result),
4625        char_at_generator_(object,
4626                           index,
4627                           scratch1,
4628                           scratch2,
4629                           result,
4630                           &need_conversion_,
4631                           &need_conversion_,
4632                           &index_out_of_range_,
4633                           STRING_INDEX_IS_NUMBER) {}
4634
4635  StringCharAtGenerator* fast_case_generator() {
4636    return &char_at_generator_;
4637  }
4638
4639  virtual void Generate() {
4640    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4641    char_at_generator_.GenerateSlow(masm(), call_helper);
4642
4643    __ bind(&need_conversion_);
4644    // Move smi zero into the result register, which will trigger
4645    // conversion.
4646    __ mov(result_, Operand(Smi::FromInt(0)));
4647    __ jmp(exit_label());
4648
4649    __ bind(&index_out_of_range_);
4650    // When the index is out of range, the spec requires us to return
4651    // the empty string.
4652    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
4653    __ jmp(exit_label());
4654  }
4655
4656 private:
4657  Register result_;
4658
4659  Label need_conversion_;
4660  Label index_out_of_range_;
4661
4662  StringCharAtGenerator char_at_generator_;
4663};
4664
4665
4666// This generates code that performs a String.prototype.charAt() call
4667// or returns a smi in order to trigger conversion.
4668void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
4669  VirtualFrame::SpilledScope spilled_scope(frame_);
4670  Comment(masm_, "[ GenerateStringCharAt");
4671  ASSERT(args->length() == 2);
4672
4673  Load(args->at(0));
4674  Load(args->at(1));
4675
4676  Register index = r1;
4677  Register object = r2;
4678
4679  frame_->EmitPop(r1);
4680  frame_->EmitPop(r2);
4681
4682  // We need three extra registers.
4683  Register scratch1 = r3;
4684  Register scratch2 = r4;
4685  Register result = r0;
4686
4687  DeferredStringCharAt* deferred =
4688      new DeferredStringCharAt(object,
4689                               index,
4690                               scratch1,
4691                               scratch2,
4692                               result);
4693  deferred->fast_case_generator()->GenerateFast(masm_);
4694  deferred->BindExit();
4695  frame_->EmitPush(result);
4696}
4697
4698
4699void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
4700  ASSERT(args->length() == 1);
4701  Load(args->at(0));
4702  JumpTarget answer;
4703  // We need the CC bits to come out as not_equal in the case where the
4704  // object is a smi.  This can't be done with the usual test opcode so
4705  // we use XOR to get the right CC bits.
4706  Register possible_array = frame_->PopToRegister();
4707  Register scratch = VirtualFrame::scratch0();
4708  __ and_(scratch, possible_array, Operand(kSmiTagMask));
4709  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4710  answer.Branch(ne);
4711  // It is a heap object - get the map. Check if the object is a JS array.
4712  __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
4713  answer.Bind();
4714  cc_reg_ = eq;
4715}
4716
4717
4718void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
4719  ASSERT(args->length() == 1);
4720  Load(args->at(0));
4721  JumpTarget answer;
4722  // We need the CC bits to come out as not_equal in the case where the
4723  // object is a smi.  This can't be done with the usual test opcode so
4724  // we use XOR to get the right CC bits.
4725  Register possible_regexp = frame_->PopToRegister();
4726  Register scratch = VirtualFrame::scratch0();
4727  __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
4728  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4729  answer.Branch(ne);
4730  // It is a heap object - get the map. Check if the object is a regexp.
4731  __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
4732  answer.Bind();
4733  cc_reg_ = eq;
4734}
4735
4736
4737void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
4738  // This generates a fast version of:
4739  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
4740  ASSERT(args->length() == 1);
4741  Load(args->at(0));
4742  Register possible_object = frame_->PopToRegister();
4743  __ tst(possible_object, Operand(kSmiTagMask));
4744  false_target()->Branch(eq);
4745
4746  __ LoadRoot(ip, Heap::kNullValueRootIndex);
4747  __ cmp(possible_object, ip);
4748  true_target()->Branch(eq);
4749
4750  Register map_reg = VirtualFrame::scratch0();
4751  __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
4752  // Undetectable objects behave like undefined when tested with typeof.
4753  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
4754  __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
4755  false_target()->Branch(ne);
4756
4757  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4758  __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
4759  false_target()->Branch(lt);
4760  __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
4761  cc_reg_ = le;
4762}
4763
4764
4765void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
4766  // This generates a fast version of:
4767  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
4768  // typeof(arg) == function).
4769  // It includes undetectable objects (as opposed to IsObject).
4770  ASSERT(args->length() == 1);
4771  Load(args->at(0));
4772  Register value = frame_->PopToRegister();
4773  __ tst(value, Operand(kSmiTagMask));
4774  false_target()->Branch(eq);
4775  // Check that this is an object.
4776  __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
4777  __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
4778  __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
4779  cc_reg_ = ge;
4780}
4781
4782
4783void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
4784  // This generates a fast version of:
4785  // (%_ClassOf(arg) === 'Function')
4786  ASSERT(args->length() == 1);
4787  Load(args->at(0));
4788  Register possible_function = frame_->PopToRegister();
4789  __ tst(possible_function, Operand(kSmiTagMask));
4790  false_target()->Branch(eq);
4791  Register map_reg = VirtualFrame::scratch0();
4792  Register scratch = VirtualFrame::scratch1();
4793  __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
4794  cc_reg_ = eq;
4795}
4796
4797
4798void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
4799  ASSERT(args->length() == 1);
4800  Load(args->at(0));
4801  Register possible_undetectable = frame_->PopToRegister();
4802  __ tst(possible_undetectable, Operand(kSmiTagMask));
4803  false_target()->Branch(eq);
4804  Register scratch = VirtualFrame::scratch0();
4805  __ ldr(scratch,
4806         FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
4807  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
4808  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
4809  cc_reg_ = ne;
4810}
4811
4812
4813void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
4814  ASSERT(args->length() == 0);
4815
4816  Register scratch0 = VirtualFrame::scratch0();
4817  Register scratch1 = VirtualFrame::scratch1();
4818  // Get the frame pointer for the calling frame.
4819  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4820
4821  // Skip the arguments adaptor frame if it exists.
4822  __ ldr(scratch1,
4823         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4824  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4825  __ ldr(scratch0,
4826         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
4827
4828  // Check the marker in the calling frame.
4829  __ ldr(scratch1,
4830         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
4831  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4832  cc_reg_ = eq;
4833}
4834
4835
4836void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
4837  ASSERT(args->length() == 0);
4838
4839  Register tos = frame_->GetTOSRegister();
4840  Register scratch0 = VirtualFrame::scratch0();
4841  Register scratch1 = VirtualFrame::scratch1();
4842
4843  // Check if the calling frame is an arguments adaptor frame.
4844  __ ldr(scratch0,
4845         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4846  __ ldr(scratch1,
4847         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4848  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4849
4850  // Get the number of formal parameters.
4851  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
4852
4853  // Arguments adaptor case: Read the arguments length from the
4854  // adaptor frame.
4855  __ ldr(tos,
4856         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
4857         eq);
4858
4859  frame_->EmitPush(tos);
4860}
4861
4862
4863void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
4864  VirtualFrame::SpilledScope spilled_scope(frame_);
4865  ASSERT(args->length() == 1);
4866
4867  // Satisfy contract with ArgumentsAccessStub:
4868  // Load the key into r1 and the formal parameters count into r0.
4869  Load(args->at(0));
4870  frame_->EmitPop(r1);
4871  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
4872
4873  // Call the shared stub to get to arguments[key].
4874  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
4875  frame_->CallStub(&stub, 0);
4876  frame_->EmitPush(r0);
4877}
4878
4879
4880void CodeGenerator::GenerateRandomHeapNumber(
4881    ZoneList<Expression*>* args) {
4882  VirtualFrame::SpilledScope spilled_scope(frame_);
4883  ASSERT(args->length() == 0);
4884
4885  Label slow_allocate_heapnumber;
4886  Label heapnumber_allocated;
4887
4888  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4889  __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
4890  __ jmp(&heapnumber_allocated);
4891
4892  __ bind(&slow_allocate_heapnumber);
4893  // Allocate a heap number.
4894  __ CallRuntime(Runtime::kNumberAlloc, 0);
4895  __ mov(r4, Operand(r0));
4896
4897  __ bind(&heapnumber_allocated);
4898
4899  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
4900  // by computing:
4901  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4902  if (CpuFeatures::IsSupported(VFP3)) {
4903    __ PrepareCallCFunction(0, r1);
4904    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
4905
4906    CpuFeatures::Scope scope(VFP3);
4907    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
4908    // Create this constant using mov/orr to avoid PC relative load.
4909    __ mov(r1, Operand(0x41000000));
4910    __ orr(r1, r1, Operand(0x300000));
4911    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
4912    __ vmov(d7, r0, r1);
4913    // Move 0x4130000000000000 to VFP.
4914    __ mov(r0, Operand(0));
4915    __ vmov(d8, r0, r1);
4916    // Subtract and store the result in the heap number.
4917    __ vsub(d7, d7, d8);
4918    __ sub(r0, r4, Operand(kHeapObjectTag));
4919    __ vstr(d7, r0, HeapNumber::kValueOffset);
4920    frame_->EmitPush(r4);
4921  } else {
4922    __ mov(r0, Operand(r4));
4923    __ PrepareCallCFunction(1, r1);
4924    __ CallCFunction(
4925        ExternalReference::fill_heap_number_with_random_function(), 1);
4926    frame_->EmitPush(r0);
4927  }
4928}
4929
4930
4931void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4932  ASSERT_EQ(2, args->length());
4933
4934  Load(args->at(0));
4935  Load(args->at(1));
4936
4937  StringAddStub stub(NO_STRING_ADD_FLAGS);
4938  frame_->SpillAll();
4939  frame_->CallStub(&stub, 2);
4940  frame_->EmitPush(r0);
4941}
4942
4943
4944void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
4945  ASSERT_EQ(3, args->length());
4946
4947  Load(args->at(0));
4948  Load(args->at(1));
4949  Load(args->at(2));
4950
4951  SubStringStub stub;
4952  frame_->SpillAll();
4953  frame_->CallStub(&stub, 3);
4954  frame_->EmitPush(r0);
4955}
4956
4957
4958void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
4959  ASSERT_EQ(2, args->length());
4960
4961  Load(args->at(0));
4962  Load(args->at(1));
4963
4964  StringCompareStub stub;
4965  frame_->SpillAll();
4966  frame_->CallStub(&stub, 2);
4967  frame_->EmitPush(r0);
4968}
4969
4970
4971void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
4972  ASSERT_EQ(4, args->length());
4973
4974  Load(args->at(0));
4975  Load(args->at(1));
4976  Load(args->at(2));
4977  Load(args->at(3));
4978  RegExpExecStub stub;
4979  frame_->SpillAll();
4980  frame_->CallStub(&stub, 4);
4981  frame_->EmitPush(r0);
4982}
4983
4984
4985void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
4986  // No stub. This code only occurs a few times in regexp.js.
4987  const int kMaxInlineLength = 100;
4988  ASSERT_EQ(3, args->length());
4989  Load(args->at(0));  // Size of array, smi.
4990  Load(args->at(1));  // "index" property value.
4991  Load(args->at(2));  // "input" property value.
4992  {
4993    VirtualFrame::SpilledScope spilled_scope(frame_);
4994    Label slowcase;
4995    Label done;
4996    __ ldr(r1, MemOperand(sp, kPointerSize * 2));
4997    STATIC_ASSERT(kSmiTag == 0);
4998    STATIC_ASSERT(kSmiTagSize == 1);
4999    __ tst(r1, Operand(kSmiTagMask));
5000    __ b(ne, &slowcase);
5001    __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
5002    __ b(hi, &slowcase);
5003    // Smi-tagging is equivalent to multiplying by 2.
5004    // Allocate RegExpResult followed by FixedArray with size in ebx.
5005    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
5006    // Elements:  [Map][Length][..elements..]
5007    // Size of JSArray with two in-object properties and the header of a
5008    // FixedArray.
5009    int objects_size =
5010        (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5011    __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
5012    __ add(r2, r5, Operand(objects_size));
5013    __ AllocateInNewSpace(
5014        r2,  // In: Size, in words.
5015        r0,  // Out: Start of allocation (tagged).
5016        r3,  // Scratch register.
5017        r4,  // Scratch register.
5018        &slowcase,
5019        static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5020    // r0: Start of allocated area, object-tagged.
5021    // r1: Number of elements in array, as smi.
5022    // r5: Number of elements, untagged.
5023
5024    // Set JSArray map to global.regexp_result_map().
5025    // Set empty properties FixedArray.
5026    // Set elements to point to FixedArray allocated right after the JSArray.
5027    // Interleave operations for better latency.
5028    __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
5029    __ add(r3, r0, Operand(JSRegExpResult::kSize));
5030    __ mov(r4, Operand(Factory::empty_fixed_array()));
5031    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5032    __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
5033    __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
5034    __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5035    __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5036
5037    // Set input, index and length fields from arguments.
5038    __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
5039    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
5040    __ add(sp, sp, Operand(kPointerSize));
5041    __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
5042    __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
5043
5044    // Fill out the elements FixedArray.
5045    // r0: JSArray, tagged.
5046    // r3: FixedArray, tagged.
5047    // r5: Number of elements in array, untagged.
5048
5049    // Set map.
5050    __ mov(r2, Operand(Factory::fixed_array_map()));
5051    __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
5052    // Set FixedArray length.
5053    __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5054    __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5055    // Fill contents of fixed-array with the-hole.
5056    __ mov(r2, Operand(Factory::the_hole_value()));
5057    __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5058    // Fill fixed array elements with hole.
5059    // r0: JSArray, tagged.
5060    // r2: the hole.
5061    // r3: Start of elements in FixedArray.
5062    // r5: Number of elements to fill.
5063    Label loop;
5064    __ tst(r5, Operand(r5));
5065    __ bind(&loop);
5066    __ b(le, &done);  // Jump if r1 is negative or zero.
5067    __ sub(r5, r5, Operand(1), SetCC);
5068    __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5069    __ jmp(&loop);
5070
5071    __ bind(&slowcase);
5072    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
5073
5074    __ bind(&done);
5075  }
5076  frame_->Forget(3);
5077  frame_->EmitPush(r0);
5078}
5079
5080
5081class DeferredSearchCache: public DeferredCode {
5082 public:
5083  DeferredSearchCache(Register dst, Register cache, Register key)
5084      : dst_(dst), cache_(cache), key_(key) {
5085    set_comment("[ DeferredSearchCache");
5086  }
5087
5088  virtual void Generate();
5089
5090 private:
5091  Register dst_, cache_, key_;
5092};
5093
5094
5095void DeferredSearchCache::Generate() {
5096  __ Push(cache_, key_);
5097  __ CallRuntime(Runtime::kGetFromCache, 2);
5098  if (!dst_.is(r0)) {
5099    __ mov(dst_, r0);
5100  }
5101}
5102
5103
5104void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
5105  ASSERT_EQ(2, args->length());
5106
5107  ASSERT_NE(NULL, args->at(0)->AsLiteral());
5108  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
5109
5110  Handle<FixedArray> jsfunction_result_caches(
5111      Top::global_context()->jsfunction_result_caches());
5112  if (jsfunction_result_caches->length() <= cache_id) {
5113    __ Abort("Attempt to use undefined cache.");
5114    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5115    return;
5116  }
5117
5118  Load(args->at(1));
5119
5120  VirtualFrame::SpilledScope spilled_scope(frame_);
5121
5122  frame_->EmitPop(r2);
5123
5124  __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
5125  __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
5126  __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
5127  __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
5128
5129  DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
5130
5131  const int kFingerOffset =
5132      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
5133  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5134  __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
5135  // r0 now holds finger offset as a smi.
5136  __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5137  // r3 now points to the start of fixed array elements.
5138  __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
5139  // Note side effect of PreIndex: r3 now points to the key of the pair.
5140  __ cmp(r2, r0);
5141  deferred->Branch(ne);
5142
5143  __ ldr(r0, MemOperand(r3, kPointerSize));
5144
5145  deferred->BindExit();
5146  frame_->EmitPush(r0);
5147}
5148
5149
5150void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
5151  ASSERT_EQ(args->length(), 1);
5152
5153  // Load the argument on the stack and jump to the runtime.
5154  Load(args->at(0));
5155
5156  NumberToStringStub stub;
5157  frame_->SpillAll();
5158  frame_->CallStub(&stub, 1);
5159  frame_->EmitPush(r0);
5160}
5161
5162
5163class DeferredSwapElements: public DeferredCode {
5164 public:
5165  DeferredSwapElements(Register object, Register index1, Register index2)
5166      : object_(object), index1_(index1), index2_(index2) {
5167    set_comment("[ DeferredSwapElements");
5168  }
5169
5170  virtual void Generate();
5171
5172 private:
5173  Register object_, index1_, index2_;
5174};
5175
5176
5177void DeferredSwapElements::Generate() {
5178  __ push(object_);
5179  __ push(index1_);
5180  __ push(index2_);
5181  __ CallRuntime(Runtime::kSwapElements, 3);
5182}
5183
5184
5185void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
5186  Comment cmnt(masm_, "[ GenerateSwapElements");
5187
5188  ASSERT_EQ(3, args->length());
5189
5190  Load(args->at(0));
5191  Load(args->at(1));
5192  Load(args->at(2));
5193
5194  VirtualFrame::SpilledScope spilled_scope(frame_);
5195
5196  Register index2 = r2;
5197  Register index1 = r1;
5198  Register object = r0;
5199  Register tmp1 = r3;
5200  Register tmp2 = r4;
5201
5202  frame_->EmitPop(index2);
5203  frame_->EmitPop(index1);
5204  frame_->EmitPop(object);
5205
5206  DeferredSwapElements* deferred =
5207      new DeferredSwapElements(object, index1, index2);
5208
5209  // Fetch the map and check if array is in fast case.
5210  // Check that object doesn't require security checks and
5211  // has no indexed interceptor.
5212  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
5213  deferred->Branch(lt);
5214  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
5215  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
5216  deferred->Branch(nz);
5217
5218  // Check the object's elements are in fast case.
5219  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
5220  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
5221  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5222  __ cmp(tmp2, ip);
5223  deferred->Branch(ne);
5224
5225  // Smi-tagging is equivalent to multiplying by 2.
5226  STATIC_ASSERT(kSmiTag == 0);
5227  STATIC_ASSERT(kSmiTagSize == 1);
5228
5229  // Check that both indices are smis.
5230  __ mov(tmp2, index1);
5231  __ orr(tmp2, tmp2, index2);
5232  __ tst(tmp2, Operand(kSmiTagMask));
5233  deferred->Branch(nz);
5234
5235  // Bring the offsets into the fixed array in tmp1 into index1 and
5236  // index2.
5237  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5238  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
5239  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
5240
5241  // Swap elements.
5242  Register tmp3 = object;
5243  object = no_reg;
5244  __ ldr(tmp3, MemOperand(tmp1, index1));
5245  __ ldr(tmp2, MemOperand(tmp1, index2));
5246  __ str(tmp3, MemOperand(tmp1, index2));
5247  __ str(tmp2, MemOperand(tmp1, index1));
5248
5249  Label done;
5250  __ InNewSpace(tmp1, tmp2, eq, &done);
5251  // Possible optimization: do a check that both values are Smis
5252  // (or them and test against Smi mask.)
5253
5254  __ mov(tmp2, tmp1);
5255  RecordWriteStub recordWrite1(tmp1, index1, tmp3);
5256  __ CallStub(&recordWrite1);
5257
5258  RecordWriteStub recordWrite2(tmp2, index2, tmp3);
5259  __ CallStub(&recordWrite2);
5260
5261  __ bind(&done);
5262
5263  deferred->BindExit();
5264  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
5265  frame_->EmitPush(tmp1);
5266}
5267
5268
5269void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
5270  Comment cmnt(masm_, "[ GenerateCallFunction");
5271
5272  ASSERT(args->length() >= 2);
5273
5274  int n_args = args->length() - 2;  // for receiver and function.
5275  Load(args->at(0));  // receiver
5276  for (int i = 0; i < n_args; i++) {
5277    Load(args->at(i + 1));
5278  }
5279  Load(args->at(n_args + 1));  // function
5280  frame_->CallJSFunction(n_args);
5281  frame_->EmitPush(r0);
5282}
5283
5284
5285void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
5286  ASSERT_EQ(args->length(), 1);
5287  Load(args->at(0));
5288  if (CpuFeatures::IsSupported(VFP3)) {
5289    TranscendentalCacheStub stub(TranscendentalCache::SIN);
5290    frame_->SpillAllButCopyTOSToR0();
5291    frame_->CallStub(&stub, 1);
5292  } else {
5293    frame_->CallRuntime(Runtime::kMath_sin, 1);
5294  }
5295  frame_->EmitPush(r0);
5296}
5297
5298
5299void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
5300  ASSERT_EQ(args->length(), 1);
5301  Load(args->at(0));
5302  if (CpuFeatures::IsSupported(VFP3)) {
5303    TranscendentalCacheStub stub(TranscendentalCache::COS);
5304    frame_->SpillAllButCopyTOSToR0();
5305    frame_->CallStub(&stub, 1);
5306  } else {
5307    frame_->CallRuntime(Runtime::kMath_cos, 1);
5308  }
5309  frame_->EmitPush(r0);
5310}
5311
5312
5313void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5314  ASSERT(args->length() == 2);
5315
5316  // Load the two objects into registers and perform the comparison.
5317  Load(args->at(0));
5318  Load(args->at(1));
5319  Register lhs = frame_->PopToRegister();
5320  Register rhs = frame_->PopToRegister(lhs);
5321  __ cmp(lhs, rhs);
5322  cc_reg_ = eq;
5323}
5324
5325
5326void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5327#ifdef DEBUG
5328  int original_height = frame_->height();
5329#endif
5330  if (CheckForInlineRuntimeCall(node)) {
5331    ASSERT((has_cc() && frame_->height() == original_height) ||
5332           (!has_cc() && frame_->height() == original_height + 1));
5333    return;
5334  }
5335
5336  ZoneList<Expression*>* args = node->arguments();
5337  Comment cmnt(masm_, "[ CallRuntime");
5338  Runtime::Function* function = node->function();
5339
5340  if (function == NULL) {
5341    // Prepare stack for calling JS runtime function.
5342    // Push the builtins object found in the current global object.
5343    Register scratch = VirtualFrame::scratch0();
5344    __ ldr(scratch, GlobalObject());
5345    Register builtins = frame_->GetTOSRegister();
5346    __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
5347    frame_->EmitPush(builtins);
5348  }
5349
5350  // Push the arguments ("left-to-right").
5351  int arg_count = args->length();
5352  for (int i = 0; i < arg_count; i++) {
5353    Load(args->at(i));
5354  }
5355
5356  VirtualFrame::SpilledScope spilled_scope(frame_);
5357
5358  if (function == NULL) {
5359    // Call the JS runtime function.
5360    __ mov(r2, Operand(node->name()));
5361    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
5362    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
5363    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
5364    __ ldr(cp, frame_->Context());
5365    frame_->EmitPush(r0);
5366  } else {
5367    // Call the C runtime function.
5368    frame_->CallRuntime(function, arg_count);
5369    frame_->EmitPush(r0);
5370  }
5371  ASSERT_EQ(original_height + 1, frame_->height());
5372}
5373
5374
5375void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5376#ifdef DEBUG
5377  int original_height = frame_->height();
5378#endif
5379  Comment cmnt(masm_, "[ UnaryOperation");
5380
5381  Token::Value op = node->op();
5382
5383  if (op == Token::NOT) {
5384    LoadCondition(node->expression(), false_target(), true_target(), true);
5385    // LoadCondition may (and usually does) leave a test and branch to
5386    // be emitted by the caller.  In that case, negate the condition.
5387    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
5388
5389  } else if (op == Token::DELETE) {
5390    Property* property = node->expression()->AsProperty();
5391    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5392    if (property != NULL) {
5393      Load(property->obj());
5394      Load(property->key());
5395      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5396      frame_->EmitPush(r0);
5397
5398    } else if (variable != NULL) {
5399      Slot* slot = variable->slot();
5400      if (variable->is_global()) {
5401        LoadGlobal();
5402        frame_->EmitPush(Operand(variable->name()));
5403        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5404        frame_->EmitPush(r0);
5405
5406      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5407        // lookup the context holding the named variable
5408        frame_->EmitPush(cp);
5409        frame_->EmitPush(Operand(variable->name()));
5410        frame_->CallRuntime(Runtime::kLookupContext, 2);
5411        // r0: context
5412        frame_->EmitPush(r0);
5413        frame_->EmitPush(Operand(variable->name()));
5414        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5415        frame_->EmitPush(r0);
5416
5417      } else {
5418        // Default: Result of deleting non-global, not dynamically
5419        // introduced variables is false.
5420        frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
5421      }
5422
5423    } else {
5424      // Default: Result of deleting expressions is true.
5425      Load(node->expression());  // may have side-effects
5426      frame_->Drop();
5427      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
5428    }
5429
5430  } else if (op == Token::TYPEOF) {
5431    // Special case for loading the typeof expression; see comment on
5432    // LoadTypeofExpression().
5433    LoadTypeofExpression(node->expression());
5434    frame_->CallRuntime(Runtime::kTypeof, 1);
5435    frame_->EmitPush(r0);  // r0 has result
5436
5437  } else {
5438    bool can_overwrite =
5439        (node->expression()->AsBinaryOperation() != NULL &&
5440         node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
5441    UnaryOverwriteMode overwrite =
5442        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
5443
5444    bool no_negative_zero = node->expression()->no_negative_zero();
5445    Load(node->expression());
5446    switch (op) {
5447      case Token::NOT:
5448      case Token::DELETE:
5449      case Token::TYPEOF:
5450        UNREACHABLE();  // handled above
5451        break;
5452
5453      case Token::SUB: {
5454        frame_->PopToR0();
5455        GenericUnaryOpStub stub(
5456            Token::SUB,
5457            overwrite,
5458            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
5459        frame_->CallStub(&stub, 0);
5460        frame_->EmitPush(r0);  // r0 has result
5461        break;
5462      }
5463
5464      case Token::BIT_NOT: {
5465        Register tos = frame_->PopToRegister();
5466        JumpTarget not_smi_label;
5467        JumpTarget continue_label;
5468        // Smi check.
5469        __ tst(tos, Operand(kSmiTagMask));
5470        not_smi_label.Branch(ne);
5471
5472        __ mvn(tos, Operand(tos));
5473        __ bic(tos, tos, Operand(kSmiTagMask));  // Bit-clear inverted smi-tag.
5474        frame_->EmitPush(tos);
5475        // The fast case is the first to jump to the continue label, so it gets
5476        // to decide the virtual frame layout.
5477        continue_label.Jump();
5478
5479        not_smi_label.Bind();
5480        frame_->SpillAll();
5481        __ Move(r0, tos);
5482        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
5483        frame_->CallStub(&stub, 0);
5484        frame_->EmitPush(r0);
5485
5486        continue_label.Bind();
5487        break;
5488      }
5489
5490      case Token::VOID:
5491        frame_->Drop();
5492        frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5493        break;
5494
5495      case Token::ADD: {
5496        Register tos = frame_->Peek();
5497        // Smi check.
5498        JumpTarget continue_label;
5499        __ tst(tos, Operand(kSmiTagMask));
5500        continue_label.Branch(eq);
5501
5502        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5503        frame_->EmitPush(r0);
5504
5505        continue_label.Bind();
5506        break;
5507      }
5508      default:
5509        UNREACHABLE();
5510    }
5511  }
5512  ASSERT(!has_valid_frame() ||
5513         (has_cc() && frame_->height() == original_height) ||
5514         (!has_cc() && frame_->height() == original_height + 1));
5515}
5516
5517
5518void CodeGenerator::VisitCountOperation(CountOperation* node) {
5519#ifdef DEBUG
5520  int original_height = frame_->height();
5521#endif
5522  Comment cmnt(masm_, "[ CountOperation");
5523  VirtualFrame::RegisterAllocationScope scope(this);
5524
5525  bool is_postfix = node->is_postfix();
5526  bool is_increment = node->op() == Token::INC;
5527
5528  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
5529  bool is_const = (var != NULL && var->mode() == Variable::CONST);
5530  bool is_slot = (var != NULL && var->mode() == Variable::VAR);
5531
5532  if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
5533    // The type info declares that this variable is always a Smi.  That
5534    // means it is a Smi both before and after the increment/decrement.
5535    // Lets make use of that to make a very minimal count.
5536    Reference target(this, node->expression(), !is_const);
5537    ASSERT(!target.is_illegal());
5538    target.GetValue();  // Pushes the value.
5539    Register value = frame_->PopToRegister();
5540    if (is_postfix) frame_->EmitPush(value);
5541    if (is_increment) {
5542      __ add(value, value, Operand(Smi::FromInt(1)));
5543    } else {
5544      __ sub(value, value, Operand(Smi::FromInt(1)));
5545    }
5546    frame_->EmitPush(value);
5547    target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
5548    if (is_postfix) frame_->Pop();
5549    ASSERT_EQ(original_height + 1, frame_->height());
5550    return;
5551  }
5552
5553  // If it's a postfix expression and its result is not ignored and the
5554  // reference is non-trivial, then push a placeholder on the stack now
5555  // to hold the result of the expression.
5556  bool placeholder_pushed = false;
5557  if (!is_slot && is_postfix) {
5558    frame_->EmitPush(Operand(Smi::FromInt(0)));
5559    placeholder_pushed = true;
5560  }
5561
5562  // A constant reference is not saved to, so a constant reference is not a
5563  // compound assignment reference.
5564  { Reference target(this, node->expression(), !is_const);
5565    if (target.is_illegal()) {
5566      // Spoof the virtual frame to have the expected height (one higher
5567      // than on entry).
5568      if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
5569      ASSERT_EQ(original_height + 1, frame_->height());
5570      return;
5571    }
5572
5573    // This pushes 0, 1 or 2 words on the object to be used later when updating
5574    // the target.  It also pushes the current value of the target.
5575    target.GetValue();
5576
5577    JumpTarget slow;
5578    JumpTarget exit;
5579
5580    Register value = frame_->PopToRegister();
5581
5582    // Postfix: Store the old value as the result.
5583    if (placeholder_pushed) {
5584      frame_->SetElementAt(value, target.size());
5585    } else if (is_postfix) {
5586      frame_->EmitPush(value);
5587      __ mov(VirtualFrame::scratch0(), value);
5588      value = VirtualFrame::scratch0();
5589    }
5590
5591    // Check for smi operand.
5592    __ tst(value, Operand(kSmiTagMask));
5593    slow.Branch(ne);
5594
5595    // Perform optimistic increment/decrement.
5596    if (is_increment) {
5597      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
5598    } else {
5599      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
5600    }
5601
5602    // If the increment/decrement didn't overflow, we're done.
5603    exit.Branch(vc);
5604
5605    // Revert optimistic increment/decrement.
5606    if (is_increment) {
5607      __ sub(value, value, Operand(Smi::FromInt(1)));
5608    } else {
5609      __ add(value, value, Operand(Smi::FromInt(1)));
5610    }
5611
5612    // Slow case: Convert to number.  At this point the
5613    // value to be incremented is in the value register..
5614    slow.Bind();
5615
5616    // Convert the operand to a number.
5617    frame_->EmitPush(value);
5618
5619    {
5620      VirtualFrame::SpilledScope spilled(frame_);
5621      frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5622
5623      if (is_postfix) {
5624        // Postfix: store to result (on the stack).
5625        __ str(r0, frame_->ElementAt(target.size()));
5626      }
5627
5628      // Compute the new value.
5629      frame_->EmitPush(r0);
5630      frame_->EmitPush(Operand(Smi::FromInt(1)));
5631      if (is_increment) {
5632        frame_->CallRuntime(Runtime::kNumberAdd, 2);
5633      } else {
5634        frame_->CallRuntime(Runtime::kNumberSub, 2);
5635      }
5636    }
5637
5638    __ Move(value, r0);
5639    // Store the new value in the target if not const.
5640    // At this point the answer is in the value register.
5641    exit.Bind();
5642    frame_->EmitPush(value);
5643    // Set the target with the result, leaving the result on
5644    // top of the stack.  Removes the target from the stack if
5645    // it has a non-zero size.
5646    if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
5647  }
5648
5649  // Postfix: Discard the new value and use the old.
5650  if (is_postfix) frame_->Pop();
5651  ASSERT_EQ(original_height + 1, frame_->height());
5652}
5653
5654
5655void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
5656  // According to ECMA-262 section 11.11, page 58, the binary logical
5657  // operators must yield the result of one of the two expressions
5658  // before any ToBoolean() conversions. This means that the value
5659  // produced by a && or || operator is not necessarily a boolean.
5660
5661  // NOTE: If the left hand side produces a materialized value (not in
5662  // the CC register), we force the right hand side to do the
5663  // same. This is necessary because we may have to branch to the exit
5664  // after evaluating the left hand side (due to the shortcut
5665  // semantics), but the compiler must (statically) know if the result
5666  // of compiling the binary operation is materialized or not.
5667  if (node->op() == Token::AND) {
5668    JumpTarget is_true;
5669    LoadCondition(node->left(), &is_true, false_target(), false);
5670    if (has_valid_frame() && !has_cc()) {
5671      // The left-hand side result is on top of the virtual frame.
5672      JumpTarget pop_and_continue;
5673      JumpTarget exit;
5674
5675      frame_->Dup();
5676      // Avoid popping the result if it converts to 'false' using the
5677      // standard ToBoolean() conversion as described in ECMA-262,
5678      // section 9.2, page 30.
5679      ToBoolean(&pop_and_continue, &exit);
5680      Branch(false, &exit);
5681
5682      // Pop the result of evaluating the first part.
5683      pop_and_continue.Bind();
5684      frame_->Pop();
5685
5686      // Evaluate right side expression.
5687      is_true.Bind();
5688      Load(node->right());
5689
5690      // Exit (always with a materialized value).
5691      exit.Bind();
5692    } else if (has_cc() || is_true.is_linked()) {
5693      // The left-hand side is either (a) partially compiled to
5694      // control flow with a final branch left to emit or (b) fully
5695      // compiled to control flow and possibly true.
5696      if (has_cc()) {
5697        Branch(false, false_target());
5698      }
5699      is_true.Bind();
5700      LoadCondition(node->right(), true_target(), false_target(), false);
5701    } else {
5702      // Nothing to do.
5703      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
5704    }
5705
5706  } else {
5707    ASSERT(node->op() == Token::OR);
5708    JumpTarget is_false;
5709    LoadCondition(node->left(), true_target(), &is_false, false);
5710    if (has_valid_frame() && !has_cc()) {
5711      // The left-hand side result is on top of the virtual frame.
5712      JumpTarget pop_and_continue;
5713      JumpTarget exit;
5714
5715      frame_->Dup();
5716      // Avoid popping the result if it converts to 'true' using the
5717      // standard ToBoolean() conversion as described in ECMA-262,
5718      // section 9.2, page 30.
5719      ToBoolean(&exit, &pop_and_continue);
5720      Branch(true, &exit);
5721
5722      // Pop the result of evaluating the first part.
5723      pop_and_continue.Bind();
5724      frame_->Pop();
5725
5726      // Evaluate right side expression.
5727      is_false.Bind();
5728      Load(node->right());
5729
5730      // Exit (always with a materialized value).
5731      exit.Bind();
5732    } else if (has_cc() || is_false.is_linked()) {
5733      // The left-hand side is either (a) partially compiled to
5734      // control flow with a final branch left to emit or (b) fully
5735      // compiled to control flow and possibly false.
5736      if (has_cc()) {
5737        Branch(true, true_target());
5738      }
5739      is_false.Bind();
5740      LoadCondition(node->right(), true_target(), false_target(), false);
5741    } else {
5742      // Nothing to do.
5743      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
5744    }
5745  }
5746}
5747
5748
5749void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
5750#ifdef DEBUG
5751  int original_height = frame_->height();
5752#endif
5753  Comment cmnt(masm_, "[ BinaryOperation");
5754
5755  if (node->op() == Token::AND || node->op() == Token::OR) {
5756    GenerateLogicalBooleanOperation(node);
5757  } else {
5758    // Optimize for the case where (at least) one of the expressions
5759    // is a literal small integer.
5760    Literal* lliteral = node->left()->AsLiteral();
5761    Literal* rliteral = node->right()->AsLiteral();
5762    // NOTE: The code below assumes that the slow cases (calls to runtime)
5763    // never return a constant/immutable object.
5764    bool overwrite_left =
5765        (node->left()->AsBinaryOperation() != NULL &&
5766         node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
5767    bool overwrite_right =
5768        (node->right()->AsBinaryOperation() != NULL &&
5769         node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
5770
5771    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
5772      VirtualFrame::RegisterAllocationScope scope(this);
5773      Load(node->left());
5774      if (frame_->KnownSmiAt(0)) overwrite_left = false;
5775      SmiOperation(node->op(),
5776                   rliteral->handle(),
5777                   false,
5778                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
5779    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
5780      VirtualFrame::RegisterAllocationScope scope(this);
5781      Load(node->right());
5782      if (frame_->KnownSmiAt(0)) overwrite_right = false;
5783      SmiOperation(node->op(),
5784                   lliteral->handle(),
5785                   true,
5786                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
5787    } else {
5788      GenerateInlineSmi inline_smi =
5789          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
5790      if (lliteral != NULL) {
5791        ASSERT(!lliteral->handle()->IsSmi());
5792        inline_smi = DONT_GENERATE_INLINE_SMI;
5793      }
5794      if (rliteral != NULL) {
5795        ASSERT(!rliteral->handle()->IsSmi());
5796        inline_smi = DONT_GENERATE_INLINE_SMI;
5797      }
5798      VirtualFrame::RegisterAllocationScope scope(this);
5799      OverwriteMode overwrite_mode = NO_OVERWRITE;
5800      if (overwrite_left) {
5801        overwrite_mode = OVERWRITE_LEFT;
5802      } else if (overwrite_right) {
5803        overwrite_mode = OVERWRITE_RIGHT;
5804      }
5805      Load(node->left());
5806      Load(node->right());
5807      GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
5808    }
5809  }
5810  ASSERT(!has_valid_frame() ||
5811         (has_cc() && frame_->height() == original_height) ||
5812         (!has_cc() && frame_->height() == original_height + 1));
5813}
5814
5815
5816void CodeGenerator::VisitThisFunction(ThisFunction* node) {
5817#ifdef DEBUG
5818  int original_height = frame_->height();
5819#endif
5820  frame_->EmitPush(MemOperand(frame_->Function()));
5821  ASSERT_EQ(original_height + 1, frame_->height());
5822}
5823
5824
5825void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
5826#ifdef DEBUG
5827  int original_height = frame_->height();
5828#endif
5829  Comment cmnt(masm_, "[ CompareOperation");
5830
5831  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
5832
5833  // Get the expressions from the node.
5834  Expression* left = node->left();
5835  Expression* right = node->right();
5836  Token::Value op = node->op();
5837
5838  // To make null checks efficient, we check if either left or right is the
5839  // literal 'null'. If so, we optimize the code by inlining a null check
5840  // instead of calling the (very) general runtime routine for checking
5841  // equality.
5842  if (op == Token::EQ || op == Token::EQ_STRICT) {
5843    bool left_is_null =
5844        left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
5845    bool right_is_null =
5846        right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
5847    // The 'null' value can only be equal to 'null' or 'undefined'.
5848    if (left_is_null || right_is_null) {
5849      Load(left_is_null ? right : left);
5850      Register tos = frame_->PopToRegister();
5851      __ LoadRoot(ip, Heap::kNullValueRootIndex);
5852      __ cmp(tos, ip);
5853
5854      // The 'null' value is only equal to 'undefined' if using non-strict
5855      // comparisons.
5856      if (op != Token::EQ_STRICT) {
5857        true_target()->Branch(eq);
5858
5859        __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5860        __ cmp(tos, Operand(ip));
5861        true_target()->Branch(eq);
5862
5863        __ tst(tos, Operand(kSmiTagMask));
5864        false_target()->Branch(eq);
5865
5866        // It can be an undetectable object.
5867        __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5868        __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
5869        __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5870        __ cmp(tos, Operand(1 << Map::kIsUndetectable));
5871      }
5872
5873      cc_reg_ = eq;
5874      ASSERT(has_cc() && frame_->height() == original_height);
5875      return;
5876    }
5877  }
5878
5879  // To make typeof testing for natives implemented in JavaScript really
5880  // efficient, we generate special code for expressions of the form:
5881  // 'typeof <expression> == <string>'.
5882  UnaryOperation* operation = left->AsUnaryOperation();
5883  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
5884      (operation != NULL && operation->op() == Token::TYPEOF) &&
5885      (right->AsLiteral() != NULL &&
5886       right->AsLiteral()->handle()->IsString())) {
5887    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
5888
5889    // Load the operand, move it to a register.
5890    LoadTypeofExpression(operation->expression());
5891    Register tos = frame_->PopToRegister();
5892
5893    Register scratch = VirtualFrame::scratch0();
5894
5895    if (check->Equals(Heap::number_symbol())) {
5896      __ tst(tos, Operand(kSmiTagMask));
5897      true_target()->Branch(eq);
5898      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5899      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5900      __ cmp(tos, ip);
5901      cc_reg_ = eq;
5902
5903    } else if (check->Equals(Heap::string_symbol())) {
5904      __ tst(tos, Operand(kSmiTagMask));
5905      false_target()->Branch(eq);
5906
5907      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5908
5909      // It can be an undetectable string object.
5910      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5911      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5912      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
5913      false_target()->Branch(eq);
5914
5915      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
5916      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
5917      cc_reg_ = lt;
5918
5919    } else if (check->Equals(Heap::boolean_symbol())) {
5920      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5921      __ cmp(tos, ip);
5922      true_target()->Branch(eq);
5923      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5924      __ cmp(tos, ip);
5925      cc_reg_ = eq;
5926
5927    } else if (check->Equals(Heap::undefined_symbol())) {
5928      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5929      __ cmp(tos, ip);
5930      true_target()->Branch(eq);
5931
5932      __ tst(tos, Operand(kSmiTagMask));
5933      false_target()->Branch(eq);
5934
5935      // It can be an undetectable object.
5936      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5937      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5938      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5939      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
5940
5941      cc_reg_ = eq;
5942
5943    } else if (check->Equals(Heap::function_symbol())) {
5944      __ tst(tos, Operand(kSmiTagMask));
5945      false_target()->Branch(eq);
5946      Register map_reg = scratch;
5947      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
5948      true_target()->Branch(eq);
5949      // Regular expressions are callable so typeof == 'function'.
5950      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
5951      cc_reg_ = eq;
5952
5953    } else if (check->Equals(Heap::object_symbol())) {
5954      __ tst(tos, Operand(kSmiTagMask));
5955      false_target()->Branch(eq);
5956
5957      __ LoadRoot(ip, Heap::kNullValueRootIndex);
5958      __ cmp(tos, ip);
5959      true_target()->Branch(eq);
5960
5961      Register map_reg = scratch;
5962      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
5963      false_target()->Branch(eq);
5964
5965      // It can be an undetectable object.
5966      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
5967      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5968      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
5969      false_target()->Branch(eq);
5970
5971      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
5972      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
5973      false_target()->Branch(lt);
5974      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
5975      cc_reg_ = le;
5976
5977    } else {
5978      // Uncommon case: typeof testing against a string literal that is
5979      // never returned from the typeof operator.
5980      false_target()->Jump();
5981    }
5982    ASSERT(!has_valid_frame() ||
5983           (has_cc() && frame_->height() == original_height));
5984    return;
5985  }
5986
5987  switch (op) {
5988    case Token::EQ:
5989      Comparison(eq, left, right, false);
5990      break;
5991
5992    case Token::LT:
5993      Comparison(lt, left, right);
5994      break;
5995
5996    case Token::GT:
5997      Comparison(gt, left, right);
5998      break;
5999
6000    case Token::LTE:
6001      Comparison(le, left, right);
6002      break;
6003
6004    case Token::GTE:
6005      Comparison(ge, left, right);
6006      break;
6007
6008    case Token::EQ_STRICT:
6009      Comparison(eq, left, right, true);
6010      break;
6011
6012    case Token::IN: {
6013      Load(left);
6014      Load(right);
6015      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
6016      frame_->EmitPush(r0);
6017      break;
6018    }
6019
6020    case Token::INSTANCEOF: {
6021      Load(left);
6022      Load(right);
6023      InstanceofStub stub;
6024      frame_->CallStub(&stub, 2);
6025      // At this point if instanceof succeeded then r0 == 0.
6026      __ tst(r0, Operand(r0));
6027      cc_reg_ = eq;
6028      break;
6029    }
6030
6031    default:
6032      UNREACHABLE();
6033  }
6034  ASSERT((has_cc() && frame_->height() == original_height) ||
6035         (!has_cc() && frame_->height() == original_height + 1));
6036}
6037
6038
6039class DeferredReferenceGetNamedValue: public DeferredCode {
6040 public:
6041  explicit DeferredReferenceGetNamedValue(Register receiver,
6042                                          Handle<String> name)
6043      : receiver_(receiver), name_(name) {
6044    set_comment("[ DeferredReferenceGetNamedValue");
6045  }
6046
6047  virtual void Generate();
6048
6049 private:
6050  Register receiver_;
6051  Handle<String> name_;
6052};
6053
6054
6055// Convention for this is that on entry the receiver is in a register that
6056// is not used by the stack.  On exit the answer is found in that same
6057// register and the stack has the same height.
6058void DeferredReferenceGetNamedValue::Generate() {
6059#ifdef DEBUG
6060  int expected_height = frame_state()->frame()->height();
6061#endif
6062  VirtualFrame copied_frame(*frame_state()->frame());
6063  copied_frame.SpillAll();
6064
6065  Register scratch1 = VirtualFrame::scratch0();
6066  Register scratch2 = VirtualFrame::scratch1();
6067  ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
6068  __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
6069  __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
6070
6071  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
6072  __ Move(r0, receiver_);
6073  __ mov(r2, Operand(name_));
6074
6075  // The rest of the instructions in the deferred code must be together.
6076  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6077    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
6078    __ Call(ic, RelocInfo::CODE_TARGET);
6079    // The call must be followed by a nop(1) instruction to indicate that the
6080    // in-object has been inlined.
6081    __ nop(PROPERTY_ACCESS_INLINED);
6082
6083    // At this point the answer is in r0.  We move it to the expected register
6084    // if necessary.
6085    __ Move(receiver_, r0);
6086
6087    // Now go back to the frame that we entered with.  This will not overwrite
6088    // the receiver register since that register was not in use when we came
6089    // in.  The instructions emitted by this merge are skipped over by the
6090    // inline load patching mechanism when looking for the branch instruction
6091    // that tells it where the code to patch is.
6092    copied_frame.MergeTo(frame_state()->frame());
6093
6094    // Block the constant pool for one more instruction after leaving this
6095    // constant pool block scope to include the branch instruction ending the
6096    // deferred code.
6097    __ BlockConstPoolFor(1);
6098  }
6099  ASSERT_EQ(expected_height, frame_state()->frame()->height());
6100}
6101
6102
6103class DeferredReferenceGetKeyedValue: public DeferredCode {
6104 public:
6105  DeferredReferenceGetKeyedValue(Register key, Register receiver)
6106      : key_(key), receiver_(receiver) {
6107    set_comment("[ DeferredReferenceGetKeyedValue");
6108  }
6109
6110  virtual void Generate();
6111
6112 private:
6113  Register key_;
6114  Register receiver_;
6115};
6116
6117
6118// Takes key and register in r0 and r1 or vice versa.  Returns result
6119// in r0.
6120void DeferredReferenceGetKeyedValue::Generate() {
6121  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
6122         (key_.is(r1) && receiver_.is(r0)));
6123
6124  VirtualFrame copied_frame(*frame_state()->frame());
6125  copied_frame.SpillAll();
6126
6127  Register scratch1 = VirtualFrame::scratch0();
6128  Register scratch2 = VirtualFrame::scratch1();
6129  __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
6130  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
6131
6132  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
6133  // convention.
6134  if (key_.is(r1)) {
6135    __ Swap(r0, r1, ip);
6136  }
6137
6138  // The rest of the instructions in the deferred code must be together.
6139  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6140    // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
6141    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
6142    __ Call(ic, RelocInfo::CODE_TARGET);
6143    // The call must be followed by a nop instruction to indicate that the
6144    // keyed load has been inlined.
6145    __ nop(PROPERTY_ACCESS_INLINED);
6146
6147    // Now go back to the frame that we entered with.  This will not overwrite
6148    // the receiver or key registers since they were not in use when we came
6149    // in.  The instructions emitted by this merge are skipped over by the
6150    // inline load patching mechanism when looking for the branch instruction
6151    // that tells it where the code to patch is.
6152    copied_frame.MergeTo(frame_state()->frame());
6153
6154    // Block the constant pool for one more instruction after leaving this
6155    // constant pool block scope to include the branch instruction ending the
6156    // deferred code.
6157    __ BlockConstPoolFor(1);
6158  }
6159}
6160
6161
6162class DeferredReferenceSetKeyedValue: public DeferredCode {
6163 public:
6164  DeferredReferenceSetKeyedValue(Register value,
6165                                 Register key,
6166                                 Register receiver)
6167      : value_(value), key_(key), receiver_(receiver) {
6168    set_comment("[ DeferredReferenceSetKeyedValue");
6169  }
6170
6171  virtual void Generate();
6172
6173 private:
6174  Register value_;
6175  Register key_;
6176  Register receiver_;
6177};
6178
6179
6180void DeferredReferenceSetKeyedValue::Generate() {
6181  Register scratch1 = VirtualFrame::scratch0();
6182  Register scratch2 = VirtualFrame::scratch1();
6183  __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
6184  __ IncrementCounter(
6185      &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
6186
6187  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
6188  // calling convention.
6189  if (value_.is(r1)) {
6190    __ Swap(r0, r1, ip);
6191  }
6192  ASSERT(receiver_.is(r2));
6193
6194  // The rest of the instructions in the deferred code must be together.
6195  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6196    // Call keyed store IC. It has the arguments value, key and receiver in r0,
6197    // r1 and r2.
6198    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
6199    __ Call(ic, RelocInfo::CODE_TARGET);
6200    // The call must be followed by a nop instruction to indicate that the
6201    // keyed store has been inlined.
6202    __ nop(PROPERTY_ACCESS_INLINED);
6203
6204    // Block the constant pool for one more instruction after leaving this
6205    // constant pool block scope to include the branch instruction ending the
6206    // deferred code.
6207    __ BlockConstPoolFor(1);
6208  }
6209}
6210
6211
6212class DeferredReferenceSetNamedValue: public DeferredCode {
6213 public:
6214  DeferredReferenceSetNamedValue(Register value,
6215                                 Register receiver,
6216                                 Handle<String> name)
6217      : value_(value), receiver_(receiver), name_(name) {
6218    set_comment("[ DeferredReferenceSetNamedValue");
6219  }
6220
6221  virtual void Generate();
6222
6223 private:
6224  Register value_;
6225  Register receiver_;
6226  Handle<String> name_;
6227};
6228
6229
6230// Takes value in r0, receiver in r1 and returns the result (the
6231// value) in r0.
6232void DeferredReferenceSetNamedValue::Generate() {
6233  // Record the entry frame and spill.
6234  VirtualFrame copied_frame(*frame_state()->frame());
6235  copied_frame.SpillAll();
6236
6237  // Ensure value in r0, receiver in r1 to match store ic calling
6238  // convention.
6239  ASSERT(value_.is(r0) && receiver_.is(r1));
6240  __ mov(r2, Operand(name_));
6241
6242  // The rest of the instructions in the deferred code must be together.
6243  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6244    // Call keyed store IC. It has the arguments value, key and receiver in r0,
6245    // r1 and r2.
6246    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
6247    __ Call(ic, RelocInfo::CODE_TARGET);
6248    // The call must be followed by a nop instruction to indicate that the
6249    // named store has been inlined.
6250    __ nop(PROPERTY_ACCESS_INLINED);
6251
6252    // Go back to the frame we entered with. The instructions
6253    // generated by this merge are skipped over by the inline store
6254    // patching mechanism when looking for the branch instruction that
6255    // tells it where the code to patch is.
6256    copied_frame.MergeTo(frame_state()->frame());
6257
6258    // Block the constant pool for one more instruction after leaving this
6259    // constant pool block scope to include the branch instruction ending the
6260    // deferred code.
6261    __ BlockConstPoolFor(1);
6262  }
6263}
6264
6265
6266// Consumes the top of stack (the receiver) and pushes the result instead.
6267void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
6268  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6269    Comment cmnt(masm(), "[ Load from named Property");
6270    // Setup the name register and call load IC.
6271    frame_->CallLoadIC(name,
6272                       is_contextual
6273                           ? RelocInfo::CODE_TARGET_CONTEXT
6274                           : RelocInfo::CODE_TARGET);
6275    frame_->EmitPush(r0);  // Push answer.
6276  } else {
6277    // Inline the in-object property case.
6278    Comment cmnt(masm(), "[ Inlined named property load");
6279
6280    // Counter will be decremented in the deferred code. Placed here to avoid
6281    // having it in the instruction stream below where patching will occur.
6282    __ IncrementCounter(&Counters::named_load_inline, 1,
6283                        frame_->scratch0(), frame_->scratch1());
6284
6285    // The following instructions are the inlined load of an in-object property.
6286    // Parts of this code is patched, so the exact instructions generated needs
6287    // to be fixed. Therefore the instruction pool is blocked when generating
6288    // this code
6289
6290    // Load the receiver from the stack.
6291    Register receiver = frame_->PopToRegister();
6292
6293    DeferredReferenceGetNamedValue* deferred =
6294        new DeferredReferenceGetNamedValue(receiver, name);
6295
6296#ifdef DEBUG
6297    int kInlinedNamedLoadInstructions = 7;
6298    Label check_inlined_codesize;
6299    masm_->bind(&check_inlined_codesize);
6300#endif
6301
6302    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6303      // Check that the receiver is a heap object.
6304      __ tst(receiver, Operand(kSmiTagMask));
6305      deferred->Branch(eq);
6306
6307      Register scratch = VirtualFrame::scratch0();
6308      Register scratch2 = VirtualFrame::scratch1();
6309
6310      // Check the map. The null map used below is patched by the inline cache
6311      // code.  Therefore we can't use a LoadRoot call.
6312      __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
6313      __ mov(scratch2, Operand(Factory::null_value()));
6314      __ cmp(scratch, scratch2);
6315      deferred->Branch(ne);
6316
6317      // Initially use an invalid index. The index will be patched by the
6318      // inline cache code.
6319      __ ldr(receiver, MemOperand(receiver, 0));
6320
6321      // Make sure that the expected number of instructions are generated.
6322      ASSERT_EQ(kInlinedNamedLoadInstructions,
6323                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6324    }
6325
6326    deferred->BindExit();
6327    // At this point the receiver register has the result, either from the
6328    // deferred code or from the inlined code.
6329    frame_->EmitPush(receiver);
6330  }
6331}
6332
6333
6334void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
6335#ifdef DEBUG
6336  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
6337#endif
6338
6339  Result result;
6340  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6341    frame()->CallStoreIC(name, is_contextual);
6342  } else {
6343    // Inline the in-object property case.
6344    JumpTarget slow, done;
6345
6346    // Get the value and receiver from the stack.
6347    frame()->PopToR0();
6348    Register value = r0;
6349    frame()->PopToR1();
6350    Register receiver = r1;
6351
6352    DeferredReferenceSetNamedValue* deferred =
6353        new DeferredReferenceSetNamedValue(value, receiver, name);
6354
6355    // Check that the receiver is a heap object.
6356    __ tst(receiver, Operand(kSmiTagMask));
6357    deferred->Branch(eq);
6358
6359    // The following instructions are the part of the inlined
6360    // in-object property store code which can be patched. Therefore
6361    // the exact number of instructions generated must be fixed, so
6362    // the constant pool is blocked while generating this code.
6363    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6364      Register scratch0 = VirtualFrame::scratch0();
6365      Register scratch1 = VirtualFrame::scratch1();
6366
6367      // Check the map. Initially use an invalid map to force a
6368      // failure. The map check will be patched in the runtime system.
6369      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
6370
6371#ifdef DEBUG
6372      Label check_inlined_codesize;
6373      masm_->bind(&check_inlined_codesize);
6374#endif
6375      __ mov(scratch0, Operand(Factory::null_value()));
6376      __ cmp(scratch0, scratch1);
6377      deferred->Branch(ne);
6378
6379      int offset = 0;
6380      __ str(value, MemOperand(receiver, offset));
6381
6382      // Update the write barrier and record its size. We do not use
6383      // the RecordWrite macro here because we want the offset
6384      // addition instruction first to make it easy to patch.
6385      Label record_write_start, record_write_done;
6386      __ bind(&record_write_start);
6387      // Add offset into the object.
6388      __ add(scratch0, receiver, Operand(offset));
6389      // Test that the object is not in the new space.  We cannot set
6390      // region marks for new space pages.
6391      __ InNewSpace(receiver, scratch1, eq, &record_write_done);
6392      // Record the actual write.
6393      __ RecordWriteHelper(receiver, scratch0, scratch1);
6394      __ bind(&record_write_done);
6395      // Clobber all input registers when running with the debug-code flag
6396      // turned on to provoke errors.
6397      if (FLAG_debug_code) {
6398        __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
6399        __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
6400        __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
6401      }
6402      // Check that this is the first inlined write barrier or that
6403      // this inlined write barrier has the same size as all the other
6404      // inlined write barriers.
6405      ASSERT((inlined_write_barrier_size_ == -1) ||
6406             (inlined_write_barrier_size_ ==
6407              masm()->InstructionsGeneratedSince(&record_write_start)));
6408      inlined_write_barrier_size_ =
6409          masm()->InstructionsGeneratedSince(&record_write_start);
6410
6411      // Make sure that the expected number of instructions are generated.
6412      ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
6413                masm()->InstructionsGeneratedSince(&check_inlined_codesize));
6414    }
6415    deferred->BindExit();
6416  }
6417  ASSERT_EQ(expected_height, frame()->height());
6418}
6419
6420
6421void CodeGenerator::EmitKeyedLoad() {
6422  if (loop_nesting() == 0) {
6423    Comment cmnt(masm_, "[ Load from keyed property");
6424    frame_->CallKeyedLoadIC();
6425  } else {
6426    // Inline the keyed load.
6427    Comment cmnt(masm_, "[ Inlined load from keyed property");
6428
6429    // Counter will be decremented in the deferred code. Placed here to avoid
6430    // having it in the instruction stream below where patching will occur.
6431    __ IncrementCounter(&Counters::keyed_load_inline, 1,
6432                        frame_->scratch0(), frame_->scratch1());
6433
6434    // Load the key and receiver from the stack.
6435    bool key_is_known_smi = frame_->KnownSmiAt(0);
6436    Register key = frame_->PopToRegister();
6437    Register receiver = frame_->PopToRegister(key);
6438
6439    // The deferred code expects key and receiver in registers.
6440    DeferredReferenceGetKeyedValue* deferred =
6441        new DeferredReferenceGetKeyedValue(key, receiver);
6442
6443    // Check that the receiver is a heap object.
6444    __ tst(receiver, Operand(kSmiTagMask));
6445    deferred->Branch(eq);
6446
6447    // The following instructions are the part of the inlined load keyed
6448    // property code which can be patched. Therefore the exact number of
6449    // instructions generated need to be fixed, so the constant pool is blocked
6450    // while generating this code.
6451    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6452      Register scratch1 = VirtualFrame::scratch0();
6453      Register scratch2 = VirtualFrame::scratch1();
6454      // Check the map. The null map used below is patched by the inline cache
6455      // code.
6456      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
6457
6458      // Check that the key is a smi.
6459      if (!key_is_known_smi) {
6460        __ tst(key, Operand(kSmiTagMask));
6461        deferred->Branch(ne);
6462      }
6463
6464#ifdef DEBUG
6465      Label check_inlined_codesize;
6466      masm_->bind(&check_inlined_codesize);
6467#endif
6468      __ mov(scratch2, Operand(Factory::null_value()));
6469      __ cmp(scratch1, scratch2);
6470      deferred->Branch(ne);
6471
6472      // Get the elements array from the receiver and check that it
6473      // is not a dictionary.
6474      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
6475      if (FLAG_debug_code) {
6476        __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
6477        __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
6478        __ cmp(scratch2, ip);
6479        __ Assert(eq, "JSObject with fast elements map has slow elements");
6480      }
6481
6482      // Check that key is within bounds. Use unsigned comparison to handle
6483      // negative keys.
6484      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
6485      __ cmp(scratch2, key);
6486      deferred->Branch(ls);  // Unsigned less equal.
6487
6488      // Load and check that the result is not the hole (key is a smi).
6489      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
6490      __ add(scratch1,
6491             scratch1,
6492             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6493      __ ldr(scratch1,
6494             MemOperand(scratch1, key, LSL,
6495                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
6496      __ cmp(scratch1, scratch2);
6497      deferred->Branch(eq);
6498
6499      __ mov(r0, scratch1);
6500      // Make sure that the expected number of instructions are generated.
6501      ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
6502                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6503    }
6504
6505    deferred->BindExit();
6506  }
6507}
6508
6509
6510void CodeGenerator::EmitKeyedStore(StaticType* key_type,
6511                                   WriteBarrierCharacter wb_info) {
6512  // Generate inlined version of the keyed store if the code is in a loop
6513  // and the key is likely to be a smi.
6514  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
6515    // Inline the keyed store.
6516    Comment cmnt(masm_, "[ Inlined store to keyed property");
6517
6518    Register scratch1 = VirtualFrame::scratch0();
6519    Register scratch2 = VirtualFrame::scratch1();
6520    Register scratch3 = r3;
6521
6522    // Counter will be decremented in the deferred code. Placed here to avoid
6523    // having it in the instruction stream below where patching will occur.
6524    __ IncrementCounter(&Counters::keyed_store_inline, 1,
6525                        scratch1, scratch2);
6526
6527
6528
6529    // Load the value, key and receiver from the stack.
6530    bool value_is_harmless = frame_->KnownSmiAt(0);
6531    if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
6532    bool key_is_smi = frame_->KnownSmiAt(1);
6533    Register value = frame_->PopToRegister();
6534    Register key = frame_->PopToRegister(value);
6535    VirtualFrame::SpilledScope spilled(frame_);
6536    Register receiver = r2;
6537    frame_->EmitPop(receiver);
6538
6539#ifdef DEBUG
6540    bool we_remembered_the_write_barrier = value_is_harmless;
6541#endif
6542
6543    // The deferred code expects value, key and receiver in registers.
6544    DeferredReferenceSetKeyedValue* deferred =
6545        new DeferredReferenceSetKeyedValue(value, key, receiver);
6546
6547    // Check that the value is a smi. As this inlined code does not set the
6548    // write barrier it is only possible to store smi values.
6549    if (!value_is_harmless) {
6550      // If the value is not likely to be a Smi then let's test the fixed array
6551      // for new space instead.  See below.
6552      if (wb_info == LIKELY_SMI) {
6553        __ tst(value, Operand(kSmiTagMask));
6554        deferred->Branch(ne);
6555#ifdef DEBUG
6556        we_remembered_the_write_barrier = true;
6557#endif
6558      }
6559    }
6560
6561    if (!key_is_smi) {
6562      // Check that the key is a smi.
6563      __ tst(key, Operand(kSmiTagMask));
6564      deferred->Branch(ne);
6565    }
6566
6567    // Check that the receiver is a heap object.
6568    __ tst(receiver, Operand(kSmiTagMask));
6569    deferred->Branch(eq);
6570
6571    // Check that the receiver is a JSArray.
6572    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
6573    deferred->Branch(ne);
6574
6575    // Check that the key is within bounds. Both the key and the length of
6576    // the JSArray are smis. Use unsigned comparison to handle negative keys.
6577    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
6578    __ cmp(scratch1, key);
6579    deferred->Branch(ls);  // Unsigned less equal.
6580
6581    // Get the elements array from the receiver.
6582    __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
6583    if (!value_is_harmless && wb_info != LIKELY_SMI) {
6584      Label ok;
6585      __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
6586      __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
6587      __ tst(value, Operand(kSmiTagMask), ne);
6588      deferred->Branch(ne);
6589#ifdef DEBUG
6590      we_remembered_the_write_barrier = true;
6591#endif
6592    }
6593    // Check that the elements array is not a dictionary.
6594    __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
6595    // The following instructions are the part of the inlined store keyed
6596    // property code which can be patched. Therefore the exact number of
6597    // instructions generated need to be fixed, so the constant pool is blocked
6598    // while generating this code.
6599    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6600#ifdef DEBUG
6601      Label check_inlined_codesize;
6602      masm_->bind(&check_inlined_codesize);
6603#endif
6604
6605      // Read the fixed array map from the constant pool (not from the root
6606      // array) so that the value can be patched.  When debugging, we patch this
6607      // comparison to always fail so that we will hit the IC call in the
6608      // deferred code which will allow the debugger to break for fast case
6609      // stores.
6610      __ mov(scratch3, Operand(Factory::fixed_array_map()));
6611      __ cmp(scratch2, scratch3);
6612      deferred->Branch(ne);
6613
6614      // Store the value.
6615      __ add(scratch1, scratch1,
6616             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6617      __ str(value,
6618             MemOperand(scratch1, key, LSL,
6619                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
6620
6621      // Make sure that the expected number of instructions are generated.
6622      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
6623                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6624    }
6625
6626    ASSERT(we_remembered_the_write_barrier);
6627
6628    deferred->BindExit();
6629  } else {
6630    frame()->CallKeyedStoreIC();
6631  }
6632}
6633
6634
6635#ifdef DEBUG
6636bool CodeGenerator::HasValidEntryRegisters() { return true; }
6637#endif
6638
6639
6640#undef __
6641#define __ ACCESS_MASM(masm)
6642
6643Handle<String> Reference::GetName() {
6644  ASSERT(type_ == NAMED);
6645  Property* property = expression_->AsProperty();
6646  if (property == NULL) {
6647    // Global variable reference treated as a named property reference.
6648    VariableProxy* proxy = expression_->AsVariableProxy();
6649    ASSERT(proxy->AsVariable() != NULL);
6650    ASSERT(proxy->AsVariable()->is_global());
6651    return proxy->name();
6652  } else {
6653    Literal* raw_name = property->key()->AsLiteral();
6654    ASSERT(raw_name != NULL);
6655    return Handle<String>(String::cast(*raw_name->handle()));
6656  }
6657}
6658
6659
6660void Reference::DupIfPersist() {
6661  if (persist_after_get_) {
6662    switch (type_) {
6663      case KEYED:
6664        cgen_->frame()->Dup2();
6665        break;
6666      case NAMED:
6667        cgen_->frame()->Dup();
6668        // Fall through.
6669      case UNLOADED:
6670      case ILLEGAL:
6671      case SLOT:
6672        // Do nothing.
6673        ;
6674    }
6675  } else {
6676    set_unloaded();
6677  }
6678}
6679
6680
6681void Reference::GetValue() {
6682  ASSERT(cgen_->HasValidEntryRegisters());
6683  ASSERT(!is_illegal());
6684  ASSERT(!cgen_->has_cc());
6685  MacroAssembler* masm = cgen_->masm();
6686  Property* property = expression_->AsProperty();
6687  if (property != NULL) {
6688    cgen_->CodeForSourcePosition(property->position());
6689  }
6690
6691  switch (type_) {
6692    case SLOT: {
6693      Comment cmnt(masm, "[ Load from Slot");
6694      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6695      ASSERT(slot != NULL);
6696      DupIfPersist();
6697      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
6698      break;
6699    }
6700
6701    case NAMED: {
6702      Variable* var = expression_->AsVariableProxy()->AsVariable();
6703      bool is_global = var != NULL;
6704      ASSERT(!is_global || var->is_global());
6705      Handle<String> name = GetName();
6706      DupIfPersist();
6707      cgen_->EmitNamedLoad(name, is_global);
6708      break;
6709    }
6710
6711    case KEYED: {
6712      ASSERT(property != NULL);
6713      DupIfPersist();
6714      cgen_->EmitKeyedLoad();
6715      cgen_->frame()->EmitPush(r0);
6716      break;
6717    }
6718
6719    default:
6720      UNREACHABLE();
6721  }
6722}
6723
6724
6725void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
6726  ASSERT(!is_illegal());
6727  ASSERT(!cgen_->has_cc());
6728  MacroAssembler* masm = cgen_->masm();
6729  VirtualFrame* frame = cgen_->frame();
6730  Property* property = expression_->AsProperty();
6731  if (property != NULL) {
6732    cgen_->CodeForSourcePosition(property->position());
6733  }
6734
6735  switch (type_) {
6736    case SLOT: {
6737      Comment cmnt(masm, "[ Store to Slot");
6738      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6739      cgen_->StoreToSlot(slot, init_state);
6740      set_unloaded();
6741      break;
6742    }
6743
6744    case NAMED: {
6745      Comment cmnt(masm, "[ Store to named Property");
6746      cgen_->EmitNamedStore(GetName(), false);
6747      frame->EmitPush(r0);
6748      set_unloaded();
6749      break;
6750    }
6751
6752    case KEYED: {
6753      Comment cmnt(masm, "[ Store to keyed Property");
6754      Property* property = expression_->AsProperty();
6755      ASSERT(property != NULL);
6756      cgen_->CodeForSourcePosition(property->position());
6757      cgen_->EmitKeyedStore(property->key()->type(), wb_info);
6758      frame->EmitPush(r0);
6759      set_unloaded();
6760      break;
6761    }
6762
6763    default:
6764      UNREACHABLE();
6765  }
6766}
6767
6768
6769void FastNewClosureStub::Generate(MacroAssembler* masm) {
6770  // Create a new closure from the given function info in new
6771  // space. Set the context to the current context in cp.
6772  Label gc;
6773
6774  // Pop the function info from the stack.
6775  __ pop(r3);
6776
6777  // Attempt to allocate new JSFunction in new space.
6778  __ AllocateInNewSpace(JSFunction::kSize,
6779                        r0,
6780                        r1,
6781                        r2,
6782                        &gc,
6783                        TAG_OBJECT);
6784
6785  // Compute the function map in the current global context and set that
6786  // as the map of the allocated object.
6787  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6788  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
6789  __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
6790  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6791
6792  // Initialize the rest of the function. We don't have to update the
6793  // write barrier because the allocated object is in new space.
6794  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
6795  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
6796  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
6797  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
6798  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
6799  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
6800  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
6801  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
6802
6803  // Return result. The argument function info has been popped already.
6804  __ Ret();
6805
6806  // Create a new closure through the slower runtime call.
6807  __ bind(&gc);
6808  __ Push(cp, r3);
6809  __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
6810}
6811
6812
6813void FastNewContextStub::Generate(MacroAssembler* masm) {
6814  // Try to allocate the context in new space.
6815  Label gc;
6816  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
6817
6818  // Attempt to allocate the context in new space.
6819  __ AllocateInNewSpace(FixedArray::SizeFor(length),
6820                        r0,
6821                        r1,
6822                        r2,
6823                        &gc,
6824                        TAG_OBJECT);
6825
6826  // Load the function from the stack.
6827  __ ldr(r3, MemOperand(sp, 0));
6828
6829  // Setup the object header.
6830  __ LoadRoot(r2, Heap::kContextMapRootIndex);
6831  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6832  __ mov(r2, Operand(Smi::FromInt(length)));
6833  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
6834
6835  // Setup the fixed slots.
6836  __ mov(r1, Operand(Smi::FromInt(0)));
6837  __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
6838  __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
6839  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
6840  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
6841
6842  // Copy the global object from the surrounding context.
6843  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6844  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
6845
6846  // Initialize the rest of the slots to undefined.
6847  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
6848  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
6849    __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
6850  }
6851
6852  // Remove the on-stack argument and return.
6853  __ mov(cp, r0);
6854  __ pop();
6855  __ Ret();
6856
6857  // Need to collect. Call into runtime system.
6858  __ bind(&gc);
6859  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
6860}
6861
6862
6863void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
6864  // Stack layout on entry:
6865  //
6866  // [sp]: constant elements.
6867  // [sp + kPointerSize]: literal index.
6868  // [sp + (2 * kPointerSize)]: literals array.
6869
6870  // All sizes here are multiples of kPointerSize.
6871  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
6872  int size = JSArray::kSize + elements_size;
6873
6874  // Load boilerplate object into r3 and check if we need to create a
6875  // boilerplate.
6876  Label slow_case;
6877  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
6878  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6879  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6880  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6881  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6882  __ cmp(r3, ip);
6883  __ b(eq, &slow_case);
6884
6885  // Allocate both the JS array and the elements array in one big
6886  // allocation. This avoids multiple limit checks.
6887  __ AllocateInNewSpace(size,
6888                        r0,
6889                        r1,
6890                        r2,
6891                        &slow_case,
6892                        TAG_OBJECT);
6893
6894  // Copy the JS array part.
6895  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
6896    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
6897      __ ldr(r1, FieldMemOperand(r3, i));
6898      __ str(r1, FieldMemOperand(r0, i));
6899    }
6900  }
6901
6902  if (length_ > 0) {
6903    // Get hold of the elements array of the boilerplate and setup the
6904    // elements pointer in the resulting object.
6905    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
6906    __ add(r2, r0, Operand(JSArray::kSize));
6907    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
6908
6909    // Copy the elements array.
6910    for (int i = 0; i < elements_size; i += kPointerSize) {
6911      __ ldr(r1, FieldMemOperand(r3, i));
6912      __ str(r1, FieldMemOperand(r2, i));
6913    }
6914  }
6915
6916  // Return and remove the on-stack parameters.
6917  __ add(sp, sp, Operand(3 * kPointerSize));
6918  __ Ret();
6919
6920  __ bind(&slow_case);
6921  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
6922}
6923
6924
6925// Takes a Smi and converts to an IEEE 64 bit floating point value in two
6926// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
6927// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
6928// scratch register.  Destroys the source register.  No GC occurs during this
6929// stub so you don't have to set up the frame.
6930class ConvertToDoubleStub : public CodeStub {
6931 public:
6932  ConvertToDoubleStub(Register result_reg_1,
6933                      Register result_reg_2,
6934                      Register source_reg,
6935                      Register scratch_reg)
6936      : result1_(result_reg_1),
6937        result2_(result_reg_2),
6938        source_(source_reg),
6939        zeros_(scratch_reg) { }
6940
6941 private:
6942  Register result1_;
6943  Register result2_;
6944  Register source_;
6945  Register zeros_;
6946
6947  // Minor key encoding in 16 bits.
6948  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
6949  class OpBits: public BitField<Token::Value, 2, 14> {};
6950
6951  Major MajorKey() { return ConvertToDouble; }
6952  int MinorKey() {
6953    // Encode the parameters in a unique 16 bit value.
6954    return  result1_.code() +
6955           (result2_.code() << 4) +
6956           (source_.code() << 8) +
6957           (zeros_.code() << 12);
6958  }
6959
6960  void Generate(MacroAssembler* masm);
6961
6962  const char* GetName() { return "ConvertToDoubleStub"; }
6963
6964#ifdef DEBUG
6965  void Print() { PrintF("ConvertToDoubleStub\n"); }
6966#endif
6967};
6968
6969
6970void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
6971#ifndef BIG_ENDIAN_FLOATING_POINT
6972  Register exponent = result1_;
6973  Register mantissa = result2_;
6974#else
6975  Register exponent = result2_;
6976  Register mantissa = result1_;
6977#endif
6978  Label not_special;
6979  // Convert from Smi to integer.
6980  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
6981  // Move sign bit from source to destination.  This works because the sign bit
6982  // in the exponent word of the double has the same position and polarity as
6983  // the 2's complement sign bit in a Smi.
6984  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
6985  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
6986  // Subtract from 0 if source was negative.
6987  __ rsb(source_, source_, Operand(0), LeaveCC, ne);
6988
6989  // We have -1, 0 or 1, which we treat specially. Register source_ contains
6990  // absolute value: it is either equal to 1 (special case of -1 and 1),
6991  // greater than 1 (not a special case) or less than 1 (special case of 0).
6992  __ cmp(source_, Operand(1));
6993  __ b(gt, &not_special);
6994
6995  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
6996  static const uint32_t exponent_word_for_1 =
6997      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
6998  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
6999  // 1, 0 and -1 all have 0 for the second word.
7000  __ mov(mantissa, Operand(0));
7001  __ Ret();
7002
7003  __ bind(&not_special);
7004  // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
7005  // Gets the wrong answer for 0, but we already checked for that case above.
7006  __ CountLeadingZeros(zeros_, source_, mantissa);
7007  // Compute exponent and or it into the exponent register.
7008  // We use mantissa as a scratch register here.  Use a fudge factor to
7009  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
7010  // that fit in the ARM's constant field.
7011  int fudge = 0x400;
7012  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
7013  __ add(mantissa, mantissa, Operand(fudge));
7014  __ orr(exponent,
7015         exponent,
7016         Operand(mantissa, LSL, HeapNumber::kExponentShift));
7017  // Shift up the source chopping the top bit off.
7018  __ add(zeros_, zeros_, Operand(1));
7019  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
7020  __ mov(source_, Operand(source_, LSL, zeros_));
7021  // Compute lower part of fraction (last 12 bits).
7022  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
7023  // And the top (top 20 bits).
7024  __ orr(exponent,
7025         exponent,
7026         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
7027  __ Ret();
7028}
7029
7030
7031// See comment for class.
7032void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
7033  Label max_negative_int;
7034  // the_int_ has the answer which is a signed int32 but not a Smi.
7035  // We test for the special value that has a different exponent.  This test
7036  // has the neat side effect of setting the flags according to the sign.
7037  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
7038  __ cmp(the_int_, Operand(0x80000000u));
7039  __ b(eq, &max_negative_int);
7040  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
7041  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
7042  uint32_t non_smi_exponent =
7043      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
7044  __ mov(scratch_, Operand(non_smi_exponent));
7045  // Set the sign bit in scratch_ if the value was negative.
7046  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
7047  // Subtract from 0 if the value was negative.
7048  __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
7049  // We should be masking the implict first digit of the mantissa away here,
7050  // but it just ends up combining harmlessly with the last digit of the
7051  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
7052  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
7053  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
7054  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
7055  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
7056  __ str(scratch_, FieldMemOperand(the_heap_number_,
7057                                   HeapNumber::kExponentOffset));
7058  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
7059  __ str(scratch_, FieldMemOperand(the_heap_number_,
7060                                   HeapNumber::kMantissaOffset));
7061  __ Ret();
7062
7063  __ bind(&max_negative_int);
7064  // The max negative int32 is stored as a positive number in the mantissa of
7065  // a double because it uses a sign bit instead of using two's complement.
7066  // The actual mantissa bits stored are all 0 because the implicit most
7067  // significant 1 bit is not stored.
7068  non_smi_exponent += 1 << HeapNumber::kExponentShift;
7069  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
7070  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
7071  __ mov(ip, Operand(0));
7072  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
7073  __ Ret();
7074}
7075
7076
7077// Handle the case where the lhs and rhs are the same object.
7078// Equality is almost reflexive (everything but NaN), so this is a test
7079// for "identity and not NaN".
7080static void EmitIdenticalObjectComparison(MacroAssembler* masm,
7081                                          Label* slow,
7082                                          Condition cc,
7083                                          bool never_nan_nan) {
7084  Label not_identical;
7085  Label heap_number, return_equal;
7086  __ cmp(r0, r1);
7087  __ b(ne, &not_identical);
7088
7089  // The two objects are identical.  If we know that one of them isn't NaN then
7090  // we now know they test equal.
7091  if (cc != eq || !never_nan_nan) {
7092    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
7093    // so we do the second best thing - test it ourselves.
7094    // They are both equal and they are not both Smis so both of them are not
7095    // Smis.  If it's not a heap number, then return equal.
7096    if (cc == lt || cc == gt) {
7097      __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
7098      __ b(ge, slow);
7099    } else {
7100      __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
7101      __ b(eq, &heap_number);
7102      // Comparing JS objects with <=, >= is complicated.
7103      if (cc != eq) {
7104        __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
7105        __ b(ge, slow);
7106        // Normally here we fall through to return_equal, but undefined is
7107        // special: (undefined == undefined) == true, but
7108        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
7109        if (cc == le || cc == ge) {
7110          __ cmp(r4, Operand(ODDBALL_TYPE));
7111          __ b(ne, &return_equal);
7112          __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
7113          __ cmp(r0, r2);
7114          __ b(ne, &return_equal);
7115          if (cc == le) {
7116            // undefined <= undefined should fail.
7117            __ mov(r0, Operand(GREATER));
7118          } else  {
7119            // undefined >= undefined should fail.
7120            __ mov(r0, Operand(LESS));
7121          }
7122          __ Ret();
7123        }
7124      }
7125    }
7126  }
7127
7128  __ bind(&return_equal);
7129  if (cc == lt) {
7130    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
7131  } else if (cc == gt) {
7132    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
7133  } else {
7134    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
7135  }
7136  __ Ret();
7137
7138  if (cc != eq || !never_nan_nan) {
7139    // For less and greater we don't have to check for NaN since the result of
7140    // x < x is false regardless.  For the others here is some code to check
7141    // for NaN.
7142    if (cc != lt && cc != gt) {
7143      __ bind(&heap_number);
7144      // It is a heap number, so return non-equal if it's NaN and equal if it's
7145      // not NaN.
7146
7147      // The representation of NaN values has all exponent bits (52..62) set,
7148      // and not all mantissa bits (0..51) clear.
7149      // Read top bits of double representation (second word of value).
7150      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7151      // Test that exponent bits are all set.
7152      __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
7153      // NaNs have all-one exponents so they sign extend to -1.
7154      __ cmp(r3, Operand(-1));
7155      __ b(ne, &return_equal);
7156
7157      // Shift out flag and all exponent bits, retaining only mantissa.
7158      __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
7159      // Or with all low-bits of mantissa.
7160      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
7161      __ orr(r0, r3, Operand(r2), SetCC);
7162      // For equal we already have the right value in r0:  Return zero (equal)
7163      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
7164      // not (it's a NaN).  For <= and >= we need to load r0 with the failing
7165      // value if it's a NaN.
7166      if (cc != eq) {
7167        // All-zero means Infinity means equal.
7168        __ Ret(eq);
7169        if (cc == le) {
7170          __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
7171        } else {
7172          __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
7173        }
7174      }
7175      __ Ret();
7176    }
7177    // No fall through here.
7178  }
7179
7180  __ bind(&not_identical);
7181}
7182
7183
7184// See comment at call site.
7185static void EmitSmiNonsmiComparison(MacroAssembler* masm,
7186                                    Register lhs,
7187                                    Register rhs,
7188                                    Label* lhs_not_nan,
7189                                    Label* slow,
7190                                    bool strict) {
7191  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
7192         (lhs.is(r1) && rhs.is(r0)));
7193
7194  Label rhs_is_smi;
7195  __ tst(rhs, Operand(kSmiTagMask));
7196  __ b(eq, &rhs_is_smi);
7197
7198  // Lhs is a Smi.  Check whether the rhs is a heap number.
7199  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
7200  if (strict) {
7201    // If rhs is not a number and lhs is a Smi then strict equality cannot
7202    // succeed.  Return non-equal
7203    // If rhs is r0 then there is already a non zero value in it.
7204    if (!rhs.is(r0)) {
7205      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
7206    }
7207    __ Ret(ne);
7208  } else {
7209    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
7210    // the runtime.
7211    __ b(ne, slow);
7212  }
7213
7214  // Lhs is a smi, rhs is a number.
7215  if (CpuFeatures::IsSupported(VFP3)) {
7216    // Convert lhs to a double in d7.
7217    CpuFeatures::Scope scope(VFP3);
7218    __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
7219    // Load the double from rhs, tagged HeapNumber r0, to d6.
7220    __ sub(r7, rhs, Operand(kHeapObjectTag));
7221    __ vldr(d6, r7, HeapNumber::kValueOffset);
7222  } else {
7223    __ push(lr);
7224    // Convert lhs to a double in r2, r3.
7225    __ mov(r7, Operand(lhs));
7226    ConvertToDoubleStub stub1(r3, r2, r7, r6);
7227    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
7228    // Load rhs to a double in r0, r1.
7229    __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
7230    __ pop(lr);
7231  }
7232
7233  // We now have both loaded as doubles but we can skip the lhs nan check
7234  // since it's a smi.
7235  __ jmp(lhs_not_nan);
7236
7237  __ bind(&rhs_is_smi);
7238  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
7239  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
7240  if (strict) {
7241    // If lhs is not a number and rhs is a smi then strict equality cannot
7242    // succeed.  Return non-equal.
7243    // If lhs is r0 then there is already a non zero value in it.
7244    if (!lhs.is(r0)) {
7245      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
7246    }
7247    __ Ret(ne);
7248  } else {
7249    // Smi compared non-strictly with a non-smi non-heap-number.  Call
7250    // the runtime.
7251    __ b(ne, slow);
7252  }
7253
7254  // Rhs is a smi, lhs is a heap number.
7255  if (CpuFeatures::IsSupported(VFP3)) {
7256    CpuFeatures::Scope scope(VFP3);
7257    // Load the double from lhs, tagged HeapNumber r1, to d7.
7258    __ sub(r7, lhs, Operand(kHeapObjectTag));
7259    __ vldr(d7, r7, HeapNumber::kValueOffset);
7260    // Convert rhs to a double in d6              .
7261    __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
7262  } else {
7263    __ push(lr);
7264    // Load lhs to a double in r2, r3.
7265    __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
7266    // Convert rhs to a double in r0, r1.
7267    __ mov(r7, Operand(rhs));
7268    ConvertToDoubleStub stub2(r1, r0, r7, r6);
7269    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
7270    __ pop(lr);
7271  }
7272  // Fall through to both_loaded_as_doubles.
7273}
7274
7275
7276void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
7277  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
7278  Register rhs_exponent = exp_first ? r0 : r1;
7279  Register lhs_exponent = exp_first ? r2 : r3;
7280  Register rhs_mantissa = exp_first ? r1 : r0;
7281  Register lhs_mantissa = exp_first ? r3 : r2;
7282  Label one_is_nan, neither_is_nan;
7283
7284  __ Sbfx(r4,
7285          lhs_exponent,
7286          HeapNumber::kExponentShift,
7287          HeapNumber::kExponentBits);
7288  // NaNs have all-one exponents so they sign extend to -1.
7289  __ cmp(r4, Operand(-1));
7290  __ b(ne, lhs_not_nan);
7291  __ mov(r4,
7292         Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
7293         SetCC);
7294  __ b(ne, &one_is_nan);
7295  __ cmp(lhs_mantissa, Operand(0));
7296  __ b(ne, &one_is_nan);
7297
7298  __ bind(lhs_not_nan);
7299  __ Sbfx(r4,
7300          rhs_exponent,
7301          HeapNumber::kExponentShift,
7302          HeapNumber::kExponentBits);
7303  // NaNs have all-one exponents so they sign extend to -1.
7304  __ cmp(r4, Operand(-1));
7305  __ b(ne, &neither_is_nan);
7306  __ mov(r4,
7307         Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
7308         SetCC);
7309  __ b(ne, &one_is_nan);
7310  __ cmp(rhs_mantissa, Operand(0));
7311  __ b(eq, &neither_is_nan);
7312
7313  __ bind(&one_is_nan);
7314  // NaN comparisons always fail.
7315  // Load whatever we need in r0 to make the comparison fail.
7316  if (cc == lt || cc == le) {
7317    __ mov(r0, Operand(GREATER));
7318  } else {
7319    __ mov(r0, Operand(LESS));
7320  }
7321  __ Ret();
7322
7323  __ bind(&neither_is_nan);
7324}
7325
7326
7327// See comment at call site.
7328static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
7329  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
7330  Register rhs_exponent = exp_first ? r0 : r1;
7331  Register lhs_exponent = exp_first ? r2 : r3;
7332  Register rhs_mantissa = exp_first ? r1 : r0;
7333  Register lhs_mantissa = exp_first ? r3 : r2;
7334
7335  // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
7336  if (cc == eq) {
7337    // Doubles are not equal unless they have the same bit pattern.
7338    // Exception: 0 and -0.
7339    __ cmp(rhs_mantissa, Operand(lhs_mantissa));
7340    __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
7341    // Return non-zero if the numbers are unequal.
7342    __ Ret(ne);
7343
7344    __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
7345    // If exponents are equal then return 0.
7346    __ Ret(eq);
7347
7348    // Exponents are unequal.  The only way we can return that the numbers
7349    // are equal is if one is -0 and the other is 0.  We already dealt
7350    // with the case where both are -0 or both are 0.
7351    // We start by seeing if the mantissas (that are equal) or the bottom
7352    // 31 bits of the rhs exponent are non-zero.  If so we return not
7353    // equal.
7354    __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
7355    __ mov(r0, Operand(r4), LeaveCC, ne);
7356    __ Ret(ne);
7357    // Now they are equal if and only if the lhs exponent is zero in its
7358    // low 31 bits.
7359    __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
7360    __ Ret();
7361  } else {
7362    // Call a native function to do a comparison between two non-NaNs.
7363    // Call C routine that may not cause GC or other trouble.
7364    __ push(lr);
7365    __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
7366    __ CallCFunction(ExternalReference::compare_doubles(), 4);
7367    __ pop(pc);  // Return.
7368  }
7369}
7370
7371
7372// See comment at call site.
7373static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
7374                                           Register lhs,
7375                                           Register rhs) {
7376    ASSERT((lhs.is(r0) && rhs.is(r1)) ||
7377           (lhs.is(r1) && rhs.is(r0)));
7378
7379    // If either operand is a JSObject or an oddball value, then they are
7380    // not equal since their pointers are different.
7381    // There is no test for undetectability in strict equality.
7382    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
7383    Label first_non_object;
7384    // Get the type of the first operand into r2 and compare it with
7385    // FIRST_JS_OBJECT_TYPE.
7386    __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
7387    __ b(lt, &first_non_object);
7388
7389    // Return non-zero (r0 is not zero)
7390    Label return_not_equal;
7391    __ bind(&return_not_equal);
7392    __ Ret();
7393
7394    __ bind(&first_non_object);
7395    // Check for oddballs: true, false, null, undefined.
7396    __ cmp(r2, Operand(ODDBALL_TYPE));
7397    __ b(eq, &return_not_equal);
7398
7399    __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
7400    __ b(ge, &return_not_equal);
7401
7402    // Check for oddballs: true, false, null, undefined.
7403    __ cmp(r3, Operand(ODDBALL_TYPE));
7404    __ b(eq, &return_not_equal);
7405
7406    // Now that we have the types we might as well check for symbol-symbol.
7407    // Ensure that no non-strings have the symbol bit set.
7408    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
7409    STATIC_ASSERT(kSymbolTag != 0);
7410    __ and_(r2, r2, Operand(r3));
7411    __ tst(r2, Operand(kIsSymbolMask));
7412    __ b(ne, &return_not_equal);
7413}
7414
7415
7416// See comment at call site.
7417static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
7418                                       Register lhs,
7419                                       Register rhs,
7420                                       Label* both_loaded_as_doubles,
7421                                       Label* not_heap_numbers,
7422                                       Label* slow) {
7423  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
7424         (lhs.is(r1) && rhs.is(r0)));
7425
7426  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
7427  __ b(ne, not_heap_numbers);
7428  __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
7429  __ cmp(r2, r3);
7430  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
7431
7432  // Both are heap numbers.  Load them up then jump to the code we have
7433  // for that.
7434  if (CpuFeatures::IsSupported(VFP3)) {
7435    CpuFeatures::Scope scope(VFP3);
7436    __ sub(r7, rhs, Operand(kHeapObjectTag));
7437    __ vldr(d6, r7, HeapNumber::kValueOffset);
7438    __ sub(r7, lhs, Operand(kHeapObjectTag));
7439    __ vldr(d7, r7, HeapNumber::kValueOffset);
7440  } else {
7441    __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
7442    __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
7443  }
7444  __ jmp(both_loaded_as_doubles);
7445}
7446
7447
7448// Fast negative check for symbol-to-symbol equality.
7449static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
7450                                         Register lhs,
7451                                         Register rhs,
7452                                         Label* possible_strings,
7453                                         Label* not_both_strings) {
7454  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
7455         (lhs.is(r1) && rhs.is(r0)));
7456
7457  // r2 is object type of rhs.
7458  // Ensure that no non-strings have the symbol bit set.
7459  Label object_test;
7460  STATIC_ASSERT(kSymbolTag != 0);
7461  __ tst(r2, Operand(kIsNotStringMask));
7462  __ b(ne, &object_test);
7463  __ tst(r2, Operand(kIsSymbolMask));
7464  __ b(eq, possible_strings);
7465  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
7466  __ b(ge, not_both_strings);
7467  __ tst(r3, Operand(kIsSymbolMask));
7468  __ b(eq, possible_strings);
7469
7470  // Both are symbols.  We already checked they weren't the same pointer
7471  // so they are not equal.
7472  __ mov(r0, Operand(NOT_EQUAL));
7473  __ Ret();
7474
7475  __ bind(&object_test);
7476  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
7477  __ b(lt, not_both_strings);
7478  __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
7479  __ b(lt, not_both_strings);
7480  // If both objects are undetectable, they are equal. Otherwise, they
7481  // are not equal, since they are different objects and an object is not
7482  // equal to undefined.
7483  __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
7484  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
7485  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
7486  __ and_(r0, r2, Operand(r3));
7487  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
7488  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
7489  __ Ret();
7490}
7491
7492
7493void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
7494                                                         Register object,
7495                                                         Register result,
7496                                                         Register scratch1,
7497                                                         Register scratch2,
7498                                                         Register scratch3,
7499                                                         bool object_is_smi,
7500                                                         Label* not_found) {
7501  // Use of registers. Register result is used as a temporary.
7502  Register number_string_cache = result;
7503  Register mask = scratch3;
7504
7505  // Load the number string cache.
7506  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
7507
7508  // Make the hash mask from the length of the number string cache. It
7509  // contains two elements (number and string) for each cache entry.
7510  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
7511  // Divide length by two (length is a smi).
7512  __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
7513  __ sub(mask, mask, Operand(1));  // Make mask.
7514
7515  // Calculate the entry in the number string cache. The hash value in the
7516  // number string cache for smis is just the smi value, and the hash for
7517  // doubles is the xor of the upper and lower words. See
7518  // Heap::GetNumberStringCache.
7519  Label is_smi;
7520  Label load_result_from_cache;
7521  if (!object_is_smi) {
7522    __ BranchOnSmi(object, &is_smi);
7523    if (CpuFeatures::IsSupported(VFP3)) {
7524      CpuFeatures::Scope scope(VFP3);
7525      __ CheckMap(object,
7526                  scratch1,
7527                  Heap::kHeapNumberMapRootIndex,
7528                  not_found,
7529                  true);
7530
7531      STATIC_ASSERT(8 == kDoubleSize);
7532      __ add(scratch1,
7533             object,
7534             Operand(HeapNumber::kValueOffset - kHeapObjectTag));
7535      __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
7536      __ eor(scratch1, scratch1, Operand(scratch2));
7537      __ and_(scratch1, scratch1, Operand(mask));
7538
7539      // Calculate address of entry in string cache: each entry consists
7540      // of two pointer sized fields.
7541      __ add(scratch1,
7542             number_string_cache,
7543             Operand(scratch1, LSL, kPointerSizeLog2 + 1));
7544
7545      Register probe = mask;
7546      __ ldr(probe,
7547             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
7548      __ BranchOnSmi(probe, not_found);
7549      __ sub(scratch2, object, Operand(kHeapObjectTag));
7550      __ vldr(d0, scratch2, HeapNumber::kValueOffset);
7551      __ sub(probe, probe, Operand(kHeapObjectTag));
7552      __ vldr(d1, probe, HeapNumber::kValueOffset);
7553      __ vcmp(d0, d1);
7554      __ vmrs(pc);
7555      __ b(ne, not_found);  // The cache did not contain this value.
7556      __ b(&load_result_from_cache);
7557    } else {
7558      __ b(not_found);
7559    }
7560  }
7561
7562  __ bind(&is_smi);
7563  Register scratch = scratch1;
7564  __ and_(scratch, mask, Operand(object, ASR, 1));
7565  // Calculate address of entry in string cache: each entry consists
7566  // of two pointer sized fields.
7567  __ add(scratch,
7568         number_string_cache,
7569         Operand(scratch, LSL, kPointerSizeLog2 + 1));
7570
7571  // Check if the entry is the smi we are looking for.
7572  Register probe = mask;
7573  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
7574  __ cmp(object, probe);
7575  __ b(ne, not_found);
7576
7577  // Get the result from the cache.
7578  __ bind(&load_result_from_cache);
7579  __ ldr(result,
7580         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
7581  __ IncrementCounter(&Counters::number_to_string_native,
7582                      1,
7583                      scratch1,
7584                      scratch2);
7585}
7586
7587
7588void NumberToStringStub::Generate(MacroAssembler* masm) {
7589  Label runtime;
7590
7591  __ ldr(r1, MemOperand(sp, 0));
7592
7593  // Generate code to lookup number in the number string cache.
7594  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
7595  __ add(sp, sp, Operand(1 * kPointerSize));
7596  __ Ret();
7597
7598  __ bind(&runtime);
7599  // Handle number to string in the runtime system if not found in the cache.
7600  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
7601}
7602
7603
7604void RecordWriteStub::Generate(MacroAssembler* masm) {
7605  __ add(offset_, object_, Operand(offset_));
7606  __ RecordWriteHelper(object_, offset_, scratch_);
7607  __ Ret();
7608}
7609
7610
7611// On entry lhs_ and rhs_ are the values to be compared.
7612// On exit r0 is 0, positive or negative to indicate the result of
7613// the comparison.
7614void CompareStub::Generate(MacroAssembler* masm) {
7615  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
7616         (lhs_.is(r1) && rhs_.is(r0)));
7617
7618  Label slow;  // Call builtin.
7619  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
7620
7621  // NOTICE! This code is only reached after a smi-fast-case check, so
7622  // it is certain that at least one operand isn't a smi.
7623
7624  // Handle the case where the objects are identical.  Either returns the answer
7625  // or goes to slow.  Only falls through if the objects were not identical.
7626  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
7627
7628  // If either is a Smi (we know that not both are), then they can only
7629  // be strictly equal if the other is a HeapNumber.
7630  STATIC_ASSERT(kSmiTag == 0);
7631  ASSERT_EQ(0, Smi::FromInt(0));
7632  __ and_(r2, lhs_, Operand(rhs_));
7633  __ tst(r2, Operand(kSmiTagMask));
7634  __ b(ne, &not_smis);
7635  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
7636  // 1) Return the answer.
7637  // 2) Go to slow.
7638  // 3) Fall through to both_loaded_as_doubles.
7639  // 4) Jump to lhs_not_nan.
7640  // In cases 3 and 4 we have found out we were dealing with a number-number
7641  // comparison.  If VFP3 is supported the double values of the numbers have
7642  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
7643  // into r0, r1, r2, and r3.
7644  EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
7645
7646  __ bind(&both_loaded_as_doubles);
7647  // The arguments have been converted to doubles and stored in d6 and d7, if
7648  // VFP3 is supported, or in r0, r1, r2, and r3.
7649  if (CpuFeatures::IsSupported(VFP3)) {
7650    __ bind(&lhs_not_nan);
7651    CpuFeatures::Scope scope(VFP3);
7652    Label no_nan;
7653    // ARMv7 VFP3 instructions to implement double precision comparison.
7654    __ vcmp(d7, d6);
7655    __ vmrs(pc);  // Move vector status bits to normal status bits.
7656    Label nan;
7657    __ b(vs, &nan);
7658    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
7659    __ mov(r0, Operand(LESS), LeaveCC, lt);
7660    __ mov(r0, Operand(GREATER), LeaveCC, gt);
7661    __ Ret();
7662
7663    __ bind(&nan);
7664    // If one of the sides was a NaN then the v flag is set.  Load r0 with
7665    // whatever it takes to make the comparison fail, since comparisons with NaN
7666    // always fail.
7667    if (cc_ == lt || cc_ == le) {
7668      __ mov(r0, Operand(GREATER));
7669    } else {
7670      __ mov(r0, Operand(LESS));
7671    }
7672    __ Ret();
7673  } else {
7674    // Checks for NaN in the doubles we have loaded.  Can return the answer or
7675    // fall through if neither is a NaN.  Also binds lhs_not_nan.
7676    EmitNanCheck(masm, &lhs_not_nan, cc_);
7677    // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the
7678    // answer.  Never falls through.
7679    EmitTwoNonNanDoubleComparison(masm, cc_);
7680  }
7681
7682  __ bind(&not_smis);
7683  // At this point we know we are dealing with two different objects,
7684  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
7685  if (strict_) {
7686    // This returns non-equal for some object types, or falls through if it
7687    // was not lucky.
7688    EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
7689  }
7690
7691  Label check_for_symbols;
7692  Label flat_string_check;
7693  // Check for heap-number-heap-number comparison.  Can jump to slow case,
7694  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
7695  // that case.  If the inputs are not doubles then jumps to check_for_symbols.
7696  // In this case r2 will contain the type of rhs_.  Never falls through.
7697  EmitCheckForTwoHeapNumbers(masm,
7698                             lhs_,
7699                             rhs_,
7700                             &both_loaded_as_doubles,
7701                             &check_for_symbols,
7702                             &flat_string_check);
7703
7704  __ bind(&check_for_symbols);
7705  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
7706  // symbols.
7707  if (cc_ == eq && !strict_) {
7708    // Returns an answer for two symbols or two detectable objects.
7709    // Otherwise jumps to string case or not both strings case.
7710    // Assumes that r2 is the type of rhs_ on entry.
7711    EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
7712  }
7713
7714  // Check for both being sequential ASCII strings, and inline if that is the
7715  // case.
7716  __ bind(&flat_string_check);
7717
7718  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
7719
7720  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
7721  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
7722                                                     lhs_,
7723                                                     rhs_,
7724                                                     r2,
7725                                                     r3,
7726                                                     r4,
7727                                                     r5);
7728  // Never falls through to here.
7729
7730  __ bind(&slow);
7731
7732  __ Push(lhs_, rhs_);
7733  // Figure out which native to call and setup the arguments.
7734  Builtins::JavaScript native;
7735  if (cc_ == eq) {
7736    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
7737  } else {
7738    native = Builtins::COMPARE;
7739    int ncr;  // NaN compare result
7740    if (cc_ == lt || cc_ == le) {
7741      ncr = GREATER;
7742    } else {
7743      ASSERT(cc_ == gt || cc_ == ge);  // remaining cases
7744      ncr = LESS;
7745    }
7746    __ mov(r0, Operand(Smi::FromInt(ncr)));
7747    __ push(r0);
7748  }
7749
7750  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
7751  // tagged as a small integer.
7752  __ InvokeBuiltin(native, JUMP_JS);
7753}
7754
7755
7756// We fall into this code if the operands were Smis, but the result was
7757// not (eg. overflow).  We branch into this code (to the not_smi label) if
7758// the operands were not both Smi.  The operands are in r0 and r1.  In order
7759// to call the C-implemented binary fp operation routines we need to end up
7760// with the double precision floating point operands in r0 and r1 (for the
7761// value in r1) and r2 and r3 (for the value in r0).
7762void GenericBinaryOpStub::HandleBinaryOpSlowCases(
7763    MacroAssembler* masm,
7764    Label* not_smi,
7765    Register lhs,
7766    Register rhs,
7767    const Builtins::JavaScript& builtin) {
7768  Label slow, slow_reverse, do_the_call;
7769  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
7770
7771  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
7772  Register heap_number_map = r6;
7773
7774  if (ShouldGenerateSmiCode()) {
7775    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7776
7777    // Smi-smi case (overflow).
7778    // Since both are Smis there is no heap number to overwrite, so allocate.
7779    // The new heap number is in r5.  r3 and r7 are scratch.
7780    __ AllocateHeapNumber(
7781        r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
7782
7783    // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
7784    // using registers d7 and d6 for the double values.
7785    if (CpuFeatures::IsSupported(VFP3)) {
7786      CpuFeatures::Scope scope(VFP3);
7787      __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
7788      __ vmov(s15, r7);
7789      __ vcvt_f64_s32(d7, s15);
7790      __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
7791      __ vmov(s13, r7);
7792      __ vcvt_f64_s32(d6, s13);
7793      if (!use_fp_registers) {
7794        __ vmov(r2, r3, d7);
7795        __ vmov(r0, r1, d6);
7796      }
7797    } else {
7798      // Write Smi from rhs to r3 and r2 in double format.  r9 is scratch.
7799      __ mov(r7, Operand(rhs));
7800      ConvertToDoubleStub stub1(r3, r2, r7, r9);
7801      __ push(lr);
7802      __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
7803      // Write Smi from lhs to r1 and r0 in double format.  r9 is scratch.
7804      __ mov(r7, Operand(lhs));
7805      ConvertToDoubleStub stub2(r1, r0, r7, r9);
7806      __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
7807      __ pop(lr);
7808    }
7809    __ jmp(&do_the_call);  // Tail call.  No return.
7810  }
7811
7812  // We branch here if at least one of r0 and r1 is not a Smi.
7813  __ bind(not_smi);
7814  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7815
7816  // After this point we have the left hand side in r1 and the right hand side
7817  // in r0.
7818  if (lhs.is(r0)) {
7819    __ Swap(r0, r1, ip);
7820  }
7821
7822  // The type transition also calculates the answer.
7823  bool generate_code_to_calculate_answer = true;
7824
7825  if (ShouldGenerateFPCode()) {
7826    if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
7827      switch (op_) {
7828        case Token::ADD:
7829        case Token::SUB:
7830        case Token::MUL:
7831        case Token::DIV:
7832          GenerateTypeTransition(masm);  // Tail call.
7833          generate_code_to_calculate_answer = false;
7834          break;
7835
7836        default:
7837          break;
7838      }
7839    }
7840
7841    if (generate_code_to_calculate_answer) {
7842      Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
7843      if (mode_ == NO_OVERWRITE) {
7844        // In the case where there is no chance of an overwritable float we may
7845        // as well do the allocation immediately while r0 and r1 are untouched.
7846        __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
7847      }
7848
7849      // Move r0 to a double in r2-r3.
7850      __ tst(r0, Operand(kSmiTagMask));
7851      __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
7852      __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7853      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7854      __ cmp(r4, heap_number_map);
7855      __ b(ne, &slow);
7856      if (mode_ == OVERWRITE_RIGHT) {
7857        __ mov(r5, Operand(r0));  // Overwrite this heap number.
7858      }
7859      if (use_fp_registers) {
7860        CpuFeatures::Scope scope(VFP3);
7861        // Load the double from tagged HeapNumber r0 to d7.
7862        __ sub(r7, r0, Operand(kHeapObjectTag));
7863        __ vldr(d7, r7, HeapNumber::kValueOffset);
7864      } else {
7865        // Calling convention says that second double is in r2 and r3.
7866        __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
7867      }
7868      __ jmp(&finished_loading_r0);
7869      __ bind(&r0_is_smi);
7870      if (mode_ == OVERWRITE_RIGHT) {
7871        // We can't overwrite a Smi so get address of new heap number into r5.
7872      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7873      }
7874
7875      if (CpuFeatures::IsSupported(VFP3)) {
7876        CpuFeatures::Scope scope(VFP3);
7877        // Convert smi in r0 to double in d7.
7878        __ mov(r7, Operand(r0, ASR, kSmiTagSize));
7879        __ vmov(s15, r7);
7880        __ vcvt_f64_s32(d7, s15);
7881        if (!use_fp_registers) {
7882          __ vmov(r2, r3, d7);
7883        }
7884      } else {
7885        // Write Smi from r0 to r3 and r2 in double format.
7886        __ mov(r7, Operand(r0));
7887        ConvertToDoubleStub stub3(r3, r2, r7, r4);
7888        __ push(lr);
7889        __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
7890        __ pop(lr);
7891      }
7892
7893      // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
7894      // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
7895      Label r1_is_not_smi;
7896      if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
7897        __ tst(r1, Operand(kSmiTagMask));
7898        __ b(ne, &r1_is_not_smi);
7899        GenerateTypeTransition(masm);  // Tail call.
7900      }
7901
7902      __ bind(&finished_loading_r0);
7903
7904      // Move r1 to a double in r0-r1.
7905      __ tst(r1, Operand(kSmiTagMask));
7906      __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
7907      __ bind(&r1_is_not_smi);
7908      __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
7909      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7910      __ cmp(r4, heap_number_map);
7911      __ b(ne, &slow);
7912      if (mode_ == OVERWRITE_LEFT) {
7913        __ mov(r5, Operand(r1));  // Overwrite this heap number.
7914      }
7915      if (use_fp_registers) {
7916        CpuFeatures::Scope scope(VFP3);
7917        // Load the double from tagged HeapNumber r1 to d6.
7918        __ sub(r7, r1, Operand(kHeapObjectTag));
7919        __ vldr(d6, r7, HeapNumber::kValueOffset);
7920      } else {
7921        // Calling convention says that first double is in r0 and r1.
7922        __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
7923      }
7924      __ jmp(&finished_loading_r1);
7925      __ bind(&r1_is_smi);
7926      if (mode_ == OVERWRITE_LEFT) {
7927        // We can't overwrite a Smi so get address of new heap number into r5.
7928      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7929      }
7930
7931      if (CpuFeatures::IsSupported(VFP3)) {
7932        CpuFeatures::Scope scope(VFP3);
7933        // Convert smi in r1 to double in d6.
7934        __ mov(r7, Operand(r1, ASR, kSmiTagSize));
7935        __ vmov(s13, r7);
7936        __ vcvt_f64_s32(d6, s13);
7937        if (!use_fp_registers) {
7938          __ vmov(r0, r1, d6);
7939        }
7940      } else {
7941        // Write Smi from r1 to r1 and r0 in double format.
7942        __ mov(r7, Operand(r1));
7943        ConvertToDoubleStub stub4(r1, r0, r7, r9);
7944        __ push(lr);
7945        __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
7946        __ pop(lr);
7947      }
7948
7949      __ bind(&finished_loading_r1);
7950    }
7951
7952    if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
7953      __ bind(&do_the_call);
7954      // If we are inlining the operation using VFP3 instructions for
7955      // add, subtract, multiply, or divide, the arguments are in d6 and d7.
7956      if (use_fp_registers) {
7957        CpuFeatures::Scope scope(VFP3);
7958        // ARMv7 VFP3 instructions to implement
7959        // double precision, add, subtract, multiply, divide.
7960
7961        if (Token::MUL == op_) {
7962          __ vmul(d5, d6, d7);
7963        } else if (Token::DIV == op_) {
7964          __ vdiv(d5, d6, d7);
7965        } else if (Token::ADD == op_) {
7966          __ vadd(d5, d6, d7);
7967        } else if (Token::SUB == op_) {
7968          __ vsub(d5, d6, d7);
7969        } else {
7970          UNREACHABLE();
7971        }
7972        __ sub(r0, r5, Operand(kHeapObjectTag));
7973        __ vstr(d5, r0, HeapNumber::kValueOffset);
7974        __ add(r0, r0, Operand(kHeapObjectTag));
7975        __ mov(pc, lr);
7976      } else {
7977        // If we did not inline the operation, then the arguments are in:
7978        // r0: Left value (least significant part of mantissa).
7979        // r1: Left value (sign, exponent, top of mantissa).
7980        // r2: Right value (least significant part of mantissa).
7981        // r3: Right value (sign, exponent, top of mantissa).
7982        // r5: Address of heap number for result.
7983
7984        __ push(lr);   // For later.
7985        __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments.
7986        // Call C routine that may not cause GC or other trouble. r5 is callee
7987        // save.
7988        __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
7989        // Store answer in the overwritable heap number.
7990    #if !defined(USE_ARM_EABI)
7991        // Double returned in fp coprocessor register 0 and 1, encoded as
7992        // register cr8.  Offsets must be divisible by 4 for coprocessor so we
7993        // need to substract the tag from r5.
7994        __ sub(r4, r5, Operand(kHeapObjectTag));
7995        __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
7996    #else
7997        // Double returned in registers 0 and 1.
7998        __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
7999    #endif
8000        __ mov(r0, Operand(r5));
8001        // And we are done.
8002        __ pop(pc);
8003      }
8004    }
8005  }
8006
8007  if (!generate_code_to_calculate_answer &&
8008      !slow_reverse.is_linked() &&
8009      !slow.is_linked()) {
8010    return;
8011  }
8012
8013  if (lhs.is(r0)) {
8014    __ b(&slow);
8015    __ bind(&slow_reverse);
8016    __ Swap(r0, r1, ip);
8017  }
8018
8019  heap_number_map = no_reg;  // Don't use this any more from here on.
8020
8021  // We jump to here if something goes wrong (one param is not a number of any
8022  // sort or new-space allocation fails).
8023  __ bind(&slow);
8024
8025  // Push arguments to the stack
8026  __ Push(r1, r0);
8027
8028  if (Token::ADD == op_) {
8029    // Test for string arguments before calling runtime.
8030    // r1 : first argument
8031    // r0 : second argument
8032    // sp[0] : second argument
8033    // sp[4] : first argument
8034
8035    Label not_strings, not_string1, string1, string1_smi2;
8036    __ tst(r1, Operand(kSmiTagMask));
8037    __ b(eq, &not_string1);
8038    __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
8039    __ b(ge, &not_string1);
8040
8041    // First argument is a a string, test second.
8042    __ tst(r0, Operand(kSmiTagMask));
8043    __ b(eq, &string1_smi2);
8044    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
8045    __ b(ge, &string1);
8046
8047    // First and second argument are strings.
8048    StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
8049    __ TailCallStub(&string_add_stub);
8050
8051    __ bind(&string1_smi2);
8052    // First argument is a string, second is a smi. Try to lookup the number
8053    // string for the smi in the number string cache.
8054    NumberToStringStub::GenerateLookupNumberStringCache(
8055        masm, r0, r2, r4, r5, r6, true, &string1);
8056
8057    // Replace second argument on stack and tailcall string add stub to make
8058    // the result.
8059    __ str(r2, MemOperand(sp, 0));
8060    __ TailCallStub(&string_add_stub);
8061
8062    // Only first argument is a string.
8063    __ bind(&string1);
8064    __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
8065
8066    // First argument was not a string, test second.
8067    __ bind(&not_string1);
8068    __ tst(r0, Operand(kSmiTagMask));
8069    __ b(eq, &not_strings);
8070    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
8071    __ b(ge, &not_strings);
8072
8073    // Only second argument is a string.
8074    __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
8075
8076    __ bind(&not_strings);
8077  }
8078
8079  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
8080}
8081
8082
8083// Tries to get a signed int32 out of a double precision floating point heap
8084// number.  Rounds towards 0.  Fastest for doubles that are in the ranges
8085// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff.  This corresponds
8086// almost to the range of signed int32 values that are not Smis.  Jumps to the
8087// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
8088// (excluding the endpoints).
8089static void GetInt32(MacroAssembler* masm,
8090                     Register source,
8091                     Register dest,
8092                     Register scratch,
8093                     Register scratch2,
8094                     Label* slow) {
8095  Label right_exponent, done;
8096  // Get exponent word.
8097  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
8098  // Get exponent alone in scratch2.
8099  __ Ubfx(scratch2,
8100          scratch,
8101          HeapNumber::kExponentShift,
8102          HeapNumber::kExponentBits);
8103  // Load dest with zero.  We use this either for the final shift or
8104  // for the answer.
8105  __ mov(dest, Operand(0));
8106  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
8107  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
8108  // the exponent that we are fastest at and also the highest exponent we can
8109  // handle here.
8110  const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
8111  // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
8112  // split it up to avoid a constant pool entry.  You can't do that in general
8113  // for cmp because of the overflow flag, but we know the exponent is in the
8114  // range 0-2047 so there is no overflow.
8115  int fudge_factor = 0x400;
8116  __ sub(scratch2, scratch2, Operand(fudge_factor));
8117  __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
8118  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
8119  __ b(eq, &right_exponent);
8120  // If the exponent is higher than that then go to slow case.  This catches
8121  // numbers that don't fit in a signed int32, infinities and NaNs.
8122  __ b(gt, slow);
8123
8124  // We know the exponent is smaller than 30 (biased).  If it is less than
8125  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
8126  // it rounds to zero.
8127  const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
8128  __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
8129  // Dest already has a Smi zero.
8130  __ b(lt, &done);
8131  if (!CpuFeatures::IsSupported(VFP3)) {
8132    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
8133    // get how much to shift down.
8134    __ rsb(dest, scratch2, Operand(30));
8135  }
8136  __ bind(&right_exponent);
8137  if (CpuFeatures::IsSupported(VFP3)) {
8138    CpuFeatures::Scope scope(VFP3);
8139    // ARMv7 VFP3 instructions implementing double precision to integer
8140    // conversion using round to zero.
8141    __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
8142    __ vmov(d7, scratch2, scratch);
8143    __ vcvt_s32_f64(s15, d7);
8144    __ vmov(dest, s15);
8145  } else {
8146    // Get the top bits of the mantissa.
8147    __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
8148    // Put back the implicit 1.
8149    __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
8150    // Shift up the mantissa bits to take up the space the exponent used to
8151    // take. We just orred in the implicit bit so that took care of one and
8152    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
8153    // distance.
8154    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
8155    __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
8156    // Put sign in zero flag.
8157    __ tst(scratch, Operand(HeapNumber::kSignMask));
8158    // Get the second half of the double. For some exponents we don't
8159    // actually need this because the bits get shifted out again, but
8160    // it's probably slower to test than just to do it.
8161    __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
8162    // Shift down 22 bits to get the last 10 bits.
8163    __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
8164    // Move down according to the exponent.
8165    __ mov(dest, Operand(scratch, LSR, dest));
8166    // Fix sign if sign bit was set.
8167    __ rsb(dest, dest, Operand(0), LeaveCC, ne);
8168  }
8169  __ bind(&done);
8170}
8171
8172// For bitwise ops where the inputs are not both Smis we here try to determine
8173// whether both inputs are either Smis or at least heap numbers that can be
8174// represented by a 32 bit signed value.  We truncate towards zero as required
8175// by the ES spec.  If this is the case we do the bitwise op and see if the
8176// result is a Smi.  If so, great, otherwise we try to find a heap number to
8177// write the answer into (either by allocating or by overwriting).
8178// On entry the operands are in lhs and rhs.  On exit the answer is in r0.
8179void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
8180                                                Register lhs,
8181                                                Register rhs) {
8182  Label slow, result_not_a_smi;
8183  Label rhs_is_smi, lhs_is_smi;
8184  Label done_checking_rhs, done_checking_lhs;
8185
8186  Register heap_number_map = r6;
8187  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8188
8189  __ tst(lhs, Operand(kSmiTagMask));
8190  __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number.
8191  __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
8192  __ cmp(r4, heap_number_map);
8193  __ b(ne, &slow);
8194  GetInt32(masm, lhs, r3, r5, r4, &slow);
8195  __ jmp(&done_checking_lhs);
8196  __ bind(&lhs_is_smi);
8197  __ mov(r3, Operand(lhs, ASR, 1));
8198  __ bind(&done_checking_lhs);
8199
8200  __ tst(rhs, Operand(kSmiTagMask));
8201  __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number.
8202  __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
8203  __ cmp(r4, heap_number_map);
8204  __ b(ne, &slow);
8205  GetInt32(masm, rhs, r2, r5, r4, &slow);
8206  __ jmp(&done_checking_rhs);
8207  __ bind(&rhs_is_smi);
8208  __ mov(r2, Operand(rhs, ASR, 1));
8209  __ bind(&done_checking_rhs);
8210
8211  ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
8212
8213  // r0 and r1: Original operands (Smi or heap numbers).
8214  // r2 and r3: Signed int32 operands.
8215  switch (op_) {
8216    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
8217    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
8218    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
8219    case Token::SAR:
8220      // Use only the 5 least significant bits of the shift count.
8221      __ and_(r2, r2, Operand(0x1f));
8222      __ mov(r2, Operand(r3, ASR, r2));
8223      break;
8224    case Token::SHR:
8225      // Use only the 5 least significant bits of the shift count.
8226      __ and_(r2, r2, Operand(0x1f));
8227      __ mov(r2, Operand(r3, LSR, r2), SetCC);
8228      // SHR is special because it is required to produce a positive answer.
8229      // The code below for writing into heap numbers isn't capable of writing
8230      // the register as an unsigned int so we go to slow case if we hit this
8231      // case.
8232      if (CpuFeatures::IsSupported(VFP3)) {
8233        __ b(mi, &result_not_a_smi);
8234      } else {
8235        __ b(mi, &slow);
8236      }
8237      break;
8238    case Token::SHL:
8239      // Use only the 5 least significant bits of the shift count.
8240      __ and_(r2, r2, Operand(0x1f));
8241      __ mov(r2, Operand(r3, LSL, r2));
8242      break;
8243    default: UNREACHABLE();
8244  }
8245  // check that the *signed* result fits in a smi
8246  __ add(r3, r2, Operand(0x40000000), SetCC);
8247  __ b(mi, &result_not_a_smi);
8248  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
8249  __ Ret();
8250
8251  Label have_to_allocate, got_a_heap_number;
8252  __ bind(&result_not_a_smi);
8253  switch (mode_) {
8254    case OVERWRITE_RIGHT: {
8255      __ tst(rhs, Operand(kSmiTagMask));
8256      __ b(eq, &have_to_allocate);
8257      __ mov(r5, Operand(rhs));
8258      break;
8259    }
8260    case OVERWRITE_LEFT: {
8261      __ tst(lhs, Operand(kSmiTagMask));
8262      __ b(eq, &have_to_allocate);
8263      __ mov(r5, Operand(lhs));
8264      break;
8265    }
8266    case NO_OVERWRITE: {
8267      // Get a new heap number in r5.  r4 and r7 are scratch.
8268      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
8269    }
8270    default: break;
8271  }
8272  __ bind(&got_a_heap_number);
8273  // r2: Answer as signed int32.
8274  // r5: Heap number to write answer into.
8275
8276  // Nothing can go wrong now, so move the heap number to r0, which is the
8277  // result.
8278  __ mov(r0, Operand(r5));
8279
8280  if (CpuFeatures::IsSupported(VFP3)) {
8281    // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
8282    CpuFeatures::Scope scope(VFP3);
8283    __ vmov(s0, r2);
8284    if (op_ == Token::SHR) {
8285      __ vcvt_f64_u32(d0, s0);
8286    } else {
8287      __ vcvt_f64_s32(d0, s0);
8288    }
8289    __ sub(r3, r0, Operand(kHeapObjectTag));
8290    __ vstr(d0, r3, HeapNumber::kValueOffset);
8291    __ Ret();
8292  } else {
8293    // Tail call that writes the int32 in r2 to the heap number in r0, using
8294    // r3 as scratch.  r0 is preserved and returned.
8295    WriteInt32ToHeapNumberStub stub(r2, r0, r3);
8296    __ TailCallStub(&stub);
8297  }
8298
8299  if (mode_ != NO_OVERWRITE) {
8300    __ bind(&have_to_allocate);
8301    // Get a new heap number in r5.  r4 and r7 are scratch.
8302    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
8303    __ jmp(&got_a_heap_number);
8304  }
8305
8306  // If all else failed then we go to the runtime system.
8307  __ bind(&slow);
8308  __ Push(lhs, rhs);  // Restore stack.
8309  switch (op_) {
8310    case Token::BIT_OR:
8311      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
8312      break;
8313    case Token::BIT_AND:
8314      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
8315      break;
8316    case Token::BIT_XOR:
8317      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
8318      break;
8319    case Token::SAR:
8320      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
8321      break;
8322    case Token::SHR:
8323      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
8324      break;
8325    case Token::SHL:
8326      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
8327      break;
8328    default:
8329      UNREACHABLE();
8330  }
8331}
8332
8333
8334// Can we multiply by x with max two shifts and an add.
8335// This answers yes to all integers from 2 to 10.
8336static bool IsEasyToMultiplyBy(int x) {
8337  if (x < 2) return false;                          // Avoid special cases.
8338  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
8339  if (IsPowerOf2(x)) return true;                   // Simple shift.
8340  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
8341  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
8342  return false;
8343}
8344
8345
8346// Can multiply by anything that IsEasyToMultiplyBy returns true for.
8347// Source and destination may be the same register.  This routine does
8348// not set carry and overflow the way a mul instruction would.
8349static void MultiplyByKnownInt(MacroAssembler* masm,
8350                               Register source,
8351                               Register destination,
8352                               int known_int) {
8353  if (IsPowerOf2(known_int)) {
8354    __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
8355  } else if (PopCountLessThanEqual2(known_int)) {
8356    int first_bit = BitPosition(known_int);
8357    int second_bit = BitPosition(known_int ^ (1 << first_bit));
8358    __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
8359    if (first_bit != 0) {
8360      __ mov(destination, Operand(destination, LSL, first_bit));
8361    }
8362  } else {
8363    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
8364    int the_bit = BitPosition(known_int + 1);
8365    __ rsb(destination, source, Operand(source, LSL, the_bit));
8366  }
8367}
8368
8369
8370// This function (as opposed to MultiplyByKnownInt) takes the known int in a
8371// a register for the cases where it doesn't know a good trick, and may deliver
8372// a result that needs shifting.
8373static void MultiplyByKnownInt2(
8374    MacroAssembler* masm,
8375    Register result,
8376    Register source,
8377    Register known_int_register,   // Smi tagged.
8378    int known_int,
8379    int* required_shift) {  // Including Smi tag shift
8380  switch (known_int) {
8381    case 3:
8382      __ add(result, source, Operand(source, LSL, 1));
8383      *required_shift = 1;
8384      break;
8385    case 5:
8386      __ add(result, source, Operand(source, LSL, 2));
8387      *required_shift = 1;
8388      break;
8389    case 6:
8390      __ add(result, source, Operand(source, LSL, 1));
8391      *required_shift = 2;
8392      break;
8393    case 7:
8394      __ rsb(result, source, Operand(source, LSL, 3));
8395      *required_shift = 1;
8396      break;
8397    case 9:
8398      __ add(result, source, Operand(source, LSL, 3));
8399      *required_shift = 1;
8400      break;
8401    case 10:
8402      __ add(result, source, Operand(source, LSL, 2));
8403      *required_shift = 2;
8404      break;
8405    default:
8406      ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient.
8407      __ mul(result, source, known_int_register);
8408      *required_shift = 0;
8409  }
8410}
8411
8412
8413// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
8414// trick.  See http://en.wikipedia.org/wiki/Divisibility_rule
8415// Takes the sum of the digits base (mask + 1) repeatedly until we have a
8416// number from 0 to mask.  On exit the 'eq' condition flags are set if the
8417// answer is exactly the mask.
8418void IntegerModStub::DigitSum(MacroAssembler* masm,
8419                              Register lhs,
8420                              int mask,
8421                              int shift,
8422                              Label* entry) {
8423  ASSERT(mask > 0);
8424  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
8425  Label loop;
8426  __ bind(&loop);
8427  __ and_(ip, lhs, Operand(mask));
8428  __ add(lhs, ip, Operand(lhs, LSR, shift));
8429  __ bind(entry);
8430  __ cmp(lhs, Operand(mask));
8431  __ b(gt, &loop);
8432}
8433
8434
8435void IntegerModStub::DigitSum(MacroAssembler* masm,
8436                              Register lhs,
8437                              Register scratch,
8438                              int mask,
8439                              int shift1,
8440                              int shift2,
8441                              Label* entry) {
8442  ASSERT(mask > 0);
8443  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
8444  Label loop;
8445  __ bind(&loop);
8446  __ bic(scratch, lhs, Operand(mask));
8447  __ and_(ip, lhs, Operand(mask));
8448  __ add(lhs, ip, Operand(lhs, LSR, shift1));
8449  __ add(lhs, lhs, Operand(scratch, LSR, shift2));
8450  __ bind(entry);
8451  __ cmp(lhs, Operand(mask));
8452  __ b(gt, &loop);
8453}
8454
8455
8456// Splits the number into two halves (bottom half has shift bits).  The top
8457// half is subtracted from the bottom half.  If the result is negative then
8458// rhs is added.
8459void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
8460                                                Register lhs,
8461                                                int shift,
8462                                                int rhs) {
8463  int mask = (1 << shift) - 1;
8464  __ and_(ip, lhs, Operand(mask));
8465  __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
8466  __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
8467}
8468
8469
8470void IntegerModStub::ModReduce(MacroAssembler* masm,
8471                               Register lhs,
8472                               int max,
8473                               int denominator) {
8474  int limit = denominator;
8475  while (limit * 2 <= max) limit *= 2;
8476  while (limit >= denominator) {
8477    __ cmp(lhs, Operand(limit));
8478    __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
8479    limit >>= 1;
8480  }
8481}
8482
8483
8484void IntegerModStub::ModAnswer(MacroAssembler* masm,
8485                               Register result,
8486                               Register shift_distance,
8487                               Register mask_bits,
8488                               Register sum_of_digits) {
8489  __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
8490  __ Ret();
8491}
8492
8493
8494// See comment for class.
8495void IntegerModStub::Generate(MacroAssembler* masm) {
8496  __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
8497  __ bic(odd_number_, odd_number_, Operand(1));
8498  __ mov(odd_number_, Operand(odd_number_, LSL, 1));
8499  // We now have (odd_number_ - 1) * 2 in the register.
8500  // Build a switch out of branches instead of data because it avoids
8501  // having to teach the assembler about intra-code-object pointers
8502  // that are not in relative branch instructions.
8503  Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
8504  Label mod21, mod23, mod25;
8505  { Assembler::BlockConstPoolScope block_const_pool(masm);
8506    __ add(pc, pc, Operand(odd_number_));
8507    // When you read pc it is always 8 ahead, but when you write it you always
8508    // write the actual value.  So we put in two nops to take up the slack.
8509    __ nop();
8510    __ nop();
8511    __ b(&mod3);
8512    __ b(&mod5);
8513    __ b(&mod7);
8514    __ b(&mod9);
8515    __ b(&mod11);
8516    __ b(&mod13);
8517    __ b(&mod15);
8518    __ b(&mod17);
8519    __ b(&mod19);
8520    __ b(&mod21);
8521    __ b(&mod23);
8522    __ b(&mod25);
8523  }
8524
8525  // For each denominator we find a multiple that is almost only ones
8526  // when expressed in binary.  Then we do the sum-of-digits trick for
8527  // that number.  If the multiple is not 1 then we have to do a little
8528  // more work afterwards to get the answer into the 0-denominator-1
8529  // range.
8530  DigitSum(masm, lhs_, 3, 2, &mod3);  // 3 = b11.
8531  __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
8532  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8533
8534  DigitSum(masm, lhs_, 0xf, 4, &mod5);  // 5 * 3 = b1111.
8535  ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
8536  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8537
8538  DigitSum(masm, lhs_, 7, 3, &mod7);  // 7 = b111.
8539  __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
8540  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8541
8542  DigitSum(masm, lhs_, 0x3f, 6, &mod9);  // 7 * 9 = b111111.
8543  ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
8544  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8545
8546  DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11);  // 5 * 11 = b110111.
8547  ModReduce(masm, lhs_, 0x3f, 11);
8548  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8549
8550  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13);  // 19 * 13 = b11110111.
8551  ModReduce(masm, lhs_, 0xff, 13);
8552  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8553
8554  DigitSum(masm, lhs_, 0xf, 4, &mod15);  // 15 = b1111.
8555  __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
8556  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8557
8558  DigitSum(masm, lhs_, 0xff, 8, &mod17);  // 15 * 17 = b11111111.
8559  ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
8560  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8561
8562  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19);  // 13 * 19 = b11110111.
8563  ModReduce(masm, lhs_, 0xff, 19);
8564  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8565
8566  DigitSum(masm, lhs_, 0x3f, 6, &mod21);  // 3 * 21 = b111111.
8567  ModReduce(masm, lhs_, 0x3f, 21);
8568  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8569
8570  DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23);  // 11 * 23 = b11111101.
8571  ModReduce(masm, lhs_, 0xff, 23);
8572  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8573
8574  DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25);  // 5 * 25 = b1111101.
8575  ModReduce(masm, lhs_, 0x7f, 25);
8576  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8577}
8578
8579
8580const char* GenericBinaryOpStub::GetName() {
8581  if (name_ != NULL) return name_;
8582  const int len = 100;
8583  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
8584  if (name_ == NULL) return "OOM";
8585  const char* op_name = Token::Name(op_);
8586  const char* overwrite_name;
8587  switch (mode_) {
8588    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
8589    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
8590    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
8591    default: overwrite_name = "UnknownOverwrite"; break;
8592  }
8593
8594  OS::SNPrintF(Vector<char>(name_, len),
8595               "GenericBinaryOpStub_%s_%s%s_%s",
8596               op_name,
8597               overwrite_name,
8598               specialized_on_rhs_ ? "_ConstantRhs" : "",
8599               BinaryOpIC::GetName(runtime_operands_type_));
8600  return name_;
8601}
8602
8603
8604
8605void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
8606  // lhs_ : x
8607  // rhs_ : y
8608  // r0   : result
8609
8610  Register result = r0;
8611  Register lhs = lhs_;
8612  Register rhs = rhs_;
8613
8614  // This code can't cope with other register allocations yet.
8615  ASSERT(result.is(r0) &&
8616         ((lhs.is(r0) && rhs.is(r1)) ||
8617          (lhs.is(r1) && rhs.is(r0))));
8618
8619  Register smi_test_reg = VirtualFrame::scratch0();
8620  Register scratch = VirtualFrame::scratch1();
8621
8622  // All ops need to know whether we are dealing with two Smis.  Set up
8623  // smi_test_reg to tell us that.
8624  if (ShouldGenerateSmiCode()) {
8625    __ orr(smi_test_reg, lhs, Operand(rhs));
8626  }
8627
8628  switch (op_) {
8629    case Token::ADD: {
8630      Label not_smi;
8631      // Fast path.
8632      if (ShouldGenerateSmiCode()) {
8633        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
8634        __ tst(smi_test_reg, Operand(kSmiTagMask));
8635        __ b(ne, &not_smi);
8636        __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
8637        // Return if no overflow.
8638        __ Ret(vc);
8639        __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
8640      }
8641      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
8642      break;
8643    }
8644
8645    case Token::SUB: {
8646      Label not_smi;
8647      // Fast path.
8648      if (ShouldGenerateSmiCode()) {
8649        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
8650        __ tst(smi_test_reg, Operand(kSmiTagMask));
8651        __ b(ne, &not_smi);
8652        if (lhs.is(r1)) {
8653          __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
8654          // Return if no overflow.
8655          __ Ret(vc);
8656          __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
8657        } else {
8658          __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically.
8659          // Return if no overflow.
8660          __ Ret(vc);
8661          __ add(r0, r0, Operand(r1));  // Revert optimistic subtract.
8662        }
8663      }
8664      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
8665      break;
8666    }
8667
8668    case Token::MUL: {
8669      Label not_smi, slow;
8670      if (ShouldGenerateSmiCode()) {
8671        STATIC_ASSERT(kSmiTag == 0);  // adjust code below
8672        __ tst(smi_test_reg, Operand(kSmiTagMask));
8673        Register scratch2 = smi_test_reg;
8674        smi_test_reg = no_reg;
8675        __ b(ne, &not_smi);
8676        // Remove tag from one operand (but keep sign), so that result is Smi.
8677        __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
8678        // Do multiplication
8679        // scratch = lower 32 bits of ip * lhs.
8680        __ smull(scratch, scratch2, lhs, ip);
8681        // Go slow on overflows (overflow bit is not set).
8682        __ mov(ip, Operand(scratch, ASR, 31));
8683        // No overflow if higher 33 bits are identical.
8684        __ cmp(ip, Operand(scratch2));
8685        __ b(ne, &slow);
8686        // Go slow on zero result to handle -0.
8687        __ tst(scratch, Operand(scratch));
8688        __ mov(result, Operand(scratch), LeaveCC, ne);
8689        __ Ret(ne);
8690        // We need -0 if we were multiplying a negative number with 0 to get 0.
8691        // We know one of them was zero.
8692        __ add(scratch2, rhs, Operand(lhs), SetCC);
8693        __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
8694        __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
8695        // Slow case.  We fall through here if we multiplied a negative number
8696        // with 0, because that would mean we should produce -0.
8697        __ bind(&slow);
8698      }
8699      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
8700      break;
8701    }
8702
8703    case Token::DIV:
8704    case Token::MOD: {
8705      Label not_smi;
8706      if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
8707        Label lhs_is_unsuitable;
8708        __ BranchOnNotSmi(lhs, &not_smi);
8709        if (IsPowerOf2(constant_rhs_)) {
8710          if (op_ == Token::MOD) {
8711            __ and_(rhs,
8712                    lhs,
8713                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
8714                    SetCC);
8715            // We now have the answer, but if the input was negative we also
8716            // have the sign bit.  Our work is done if the result is
8717            // positive or zero:
8718            if (!rhs.is(r0)) {
8719              __ mov(r0, rhs, LeaveCC, pl);
8720            }
8721            __ Ret(pl);
8722            // A mod of a negative left hand side must return a negative number.
8723            // Unfortunately if the answer is 0 then we must return -0.  And we
8724            // already optimistically trashed rhs so we may need to restore it.
8725            __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
8726            // Next two instructions are conditional on the answer being -0.
8727            __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
8728            __ b(eq, &lhs_is_unsuitable);
8729            // We need to subtract the dividend.  Eg. -3 % 4 == -3.
8730            __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
8731          } else {
8732            ASSERT(op_ == Token::DIV);
8733            __ tst(lhs,
8734                   Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
8735            __ b(ne, &lhs_is_unsuitable);  // Go slow on negative or remainder.
8736            int shift = 0;
8737            int d = constant_rhs_;
8738            while ((d & 1) == 0) {
8739              d >>= 1;
8740              shift++;
8741            }
8742            __ mov(r0, Operand(lhs, LSR, shift));
8743            __ bic(r0, r0, Operand(kSmiTagMask));
8744          }
8745        } else {
8746          // Not a power of 2.
8747          __ tst(lhs, Operand(0x80000000u));
8748          __ b(ne, &lhs_is_unsuitable);
8749          // Find a fixed point reciprocal of the divisor so we can divide by
8750          // multiplying.
8751          double divisor = 1.0 / constant_rhs_;
8752          int shift = 32;
8753          double scale = 4294967296.0;  // 1 << 32.
8754          uint32_t mul;
8755          // Maximise the precision of the fixed point reciprocal.
8756          while (true) {
8757            mul = static_cast<uint32_t>(scale * divisor);
8758            if (mul >= 0x7fffffff) break;
8759            scale *= 2.0;
8760            shift++;
8761          }
8762          mul++;
8763          Register scratch2 = smi_test_reg;
8764          smi_test_reg = no_reg;
8765          __ mov(scratch2, Operand(mul));
8766          __ umull(scratch, scratch2, scratch2, lhs);
8767          __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
8768          // scratch2 is lhs / rhs.  scratch2 is not Smi tagged.
8769          // rhs is still the known rhs.  rhs is Smi tagged.
8770          // lhs is still the unkown lhs.  lhs is Smi tagged.
8771          int required_scratch_shift = 0;  // Including the Smi tag shift of 1.
8772          // scratch = scratch2 * rhs.
8773          MultiplyByKnownInt2(masm,
8774                              scratch,
8775                              scratch2,
8776                              rhs,
8777                              constant_rhs_,
8778                              &required_scratch_shift);
8779          // scratch << required_scratch_shift is now the Smi tagged rhs *
8780          // (lhs / rhs) where / indicates integer division.
8781          if (op_ == Token::DIV) {
8782            __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
8783            __ b(ne, &lhs_is_unsuitable);  // There was a remainder.
8784            __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
8785          } else {
8786            ASSERT(op_ == Token::MOD);
8787            __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
8788          }
8789        }
8790        __ Ret();
8791        __ bind(&lhs_is_unsuitable);
8792      } else if (op_ == Token::MOD &&
8793                 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
8794                 runtime_operands_type_ != BinaryOpIC::STRINGS) {
8795        // Do generate a bit of smi code for modulus even though the default for
8796        // modulus is not to do it, but as the ARM processor has no coprocessor
8797        // support for modulus checking for smis makes sense.  We can handle
8798        // 1 to 25 times any power of 2.  This covers over half the numbers from
8799        // 1 to 100 including all of the first 25.  (Actually the constants < 10
8800        // are handled above by reciprocal multiplication.  We only get here for
8801        // those cases if the right hand side is not a constant or for cases
8802        // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
8803        // stub.)
8804        Label slow;
8805        Label not_power_of_2;
8806        ASSERT(!ShouldGenerateSmiCode());
8807        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
8808        // Check for two positive smis.
8809        __ orr(smi_test_reg, lhs, Operand(rhs));
8810        __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
8811        __ b(ne, &slow);
8812        // Check that rhs is a power of two and not zero.
8813        Register mask_bits = r3;
8814        __ sub(scratch, rhs, Operand(1), SetCC);
8815        __ b(mi, &slow);
8816        __ and_(mask_bits, rhs, Operand(scratch), SetCC);
8817        __ b(ne, &not_power_of_2);
8818        // Calculate power of two modulus.
8819        __ and_(result, lhs, Operand(scratch));
8820        __ Ret();
8821
8822        __ bind(&not_power_of_2);
8823        __ eor(scratch, scratch, Operand(mask_bits));
8824        // At least two bits are set in the modulus.  The high one(s) are in
8825        // mask_bits and the low one is scratch + 1.
8826        __ and_(mask_bits, scratch, Operand(lhs));
8827        Register shift_distance = scratch;
8828        scratch = no_reg;
8829
8830        // The rhs consists of a power of 2 multiplied by some odd number.
8831        // The power-of-2 part we handle by putting the corresponding bits
8832        // from the lhs in the mask_bits register, and the power in the
8833        // shift_distance register.  Shift distance is never 0 due to Smi
8834        // tagging.
8835        __ CountLeadingZeros(r4, shift_distance, shift_distance);
8836        __ rsb(shift_distance, r4, Operand(32));
8837
8838        // Now we need to find out what the odd number is. The last bit is
8839        // always 1.
8840        Register odd_number = r4;
8841        __ mov(odd_number, Operand(rhs, LSR, shift_distance));
8842        __ cmp(odd_number, Operand(25));
8843        __ b(gt, &slow);
8844
8845        IntegerModStub stub(
8846            result, shift_distance, odd_number, mask_bits, lhs, r5);
8847        __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);  // Tail call.
8848
8849        __ bind(&slow);
8850      }
8851      HandleBinaryOpSlowCases(
8852          masm,
8853          &not_smi,
8854          lhs,
8855          rhs,
8856          op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
8857      break;
8858    }
8859
8860    case Token::BIT_OR:
8861    case Token::BIT_AND:
8862    case Token::BIT_XOR:
8863    case Token::SAR:
8864    case Token::SHR:
8865    case Token::SHL: {
8866      Label slow;
8867      STATIC_ASSERT(kSmiTag == 0);  // adjust code below
8868      __ tst(smi_test_reg, Operand(kSmiTagMask));
8869      __ b(ne, &slow);
8870      Register scratch2 = smi_test_reg;
8871      smi_test_reg = no_reg;
8872      switch (op_) {
8873        case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break;
8874        case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
8875        case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
8876        case Token::SAR:
8877          // Remove tags from right operand.
8878          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8879          __ mov(result, Operand(lhs, ASR, scratch2));
8880          // Smi tag result.
8881          __ bic(result, result, Operand(kSmiTagMask));
8882          break;
8883        case Token::SHR:
8884          // Remove tags from operands.  We can't do this on a 31 bit number
8885          // because then the 0s get shifted into bit 30 instead of bit 31.
8886          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
8887          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8888          __ mov(scratch, Operand(scratch, LSR, scratch2));
8889          // Unsigned shift is not allowed to produce a negative number, so
8890          // check the sign bit and the sign bit after Smi tagging.
8891          __ tst(scratch, Operand(0xc0000000));
8892          __ b(ne, &slow);
8893          // Smi tag result.
8894          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
8895          break;
8896        case Token::SHL:
8897          // Remove tags from operands.
8898          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
8899          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8900          __ mov(scratch, Operand(scratch, LSL, scratch2));
8901          // Check that the signed result fits in a Smi.
8902          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
8903          __ b(mi, &slow);
8904          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
8905          break;
8906        default: UNREACHABLE();
8907      }
8908      __ Ret();
8909      __ bind(&slow);
8910      HandleNonSmiBitwiseOp(masm, lhs, rhs);
8911      break;
8912    }
8913
8914    default: UNREACHABLE();
8915  }
8916  // This code should be unreachable.
8917  __ stop("Unreachable");
8918
8919  // Generate an unreachable reference to the DEFAULT stub so that it can be
8920  // found at the end of this stub when clearing ICs at GC.
8921  // TODO(kaznacheev): Check performance impact and get rid of this.
8922  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
8923    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
8924    __ CallStub(&uninit);
8925  }
8926}
8927
8928
8929void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
8930  Label get_result;
8931
8932  __ Push(r1, r0);
8933
8934  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
8935  __ mov(r1, Operand(Smi::FromInt(op_)));
8936  __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
8937  __ Push(r2, r1, r0);
8938
8939  __ TailCallExternalReference(
8940      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
8941      5,
8942      1);
8943}
8944
8945
8946Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
8947  GenericBinaryOpStub stub(key, type_info);
8948  return stub.GetCode();
8949}
8950
8951
8952void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
8953  // Argument is a number and is on stack and in r0.
8954  Label runtime_call;
8955  Label input_not_smi;
8956  Label loaded;
8957
8958  if (CpuFeatures::IsSupported(VFP3)) {
8959    // Load argument and check if it is a smi.
8960    __ BranchOnNotSmi(r0, &input_not_smi);
8961
8962    CpuFeatures::Scope scope(VFP3);
8963    // Input is a smi. Convert to double and load the low and high words
8964    // of the double into r2, r3.
8965    __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
8966    __ b(&loaded);
8967
8968    __ bind(&input_not_smi);
8969    // Check if input is a HeapNumber.
8970    __ CheckMap(r0,
8971                r1,
8972                Heap::kHeapNumberMapRootIndex,
8973                &runtime_call,
8974                true);
8975    // Input is a HeapNumber. Load it to a double register and store the
8976    // low and high words into r2, r3.
8977    __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
8978
8979    __ bind(&loaded);
8980    // r2 = low 32 bits of double value
8981    // r3 = high 32 bits of double value
8982    // Compute hash (the shifts are arithmetic):
8983    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
8984    __ eor(r1, r2, Operand(r3));
8985    __ eor(r1, r1, Operand(r1, ASR, 16));
8986    __ eor(r1, r1, Operand(r1, ASR, 8));
8987    ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
8988    __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
8989
8990    // r2 = low 32 bits of double value.
8991    // r3 = high 32 bits of double value.
8992    // r1 = TranscendentalCache::hash(double value).
8993    __ mov(r0,
8994           Operand(ExternalReference::transcendental_cache_array_address()));
8995    // r0 points to cache array.
8996    __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
8997    // r0 points to the cache for the type type_.
8998    // If NULL, the cache hasn't been initialized yet, so go through runtime.
8999    __ cmp(r0, Operand(0));
9000    __ b(eq, &runtime_call);
9001
9002#ifdef DEBUG
9003    // Check that the layout of cache elements match expectations.
9004    { TranscendentalCache::Element test_elem[2];
9005      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
9006      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
9007      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
9008      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
9009      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
9010      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
9011      CHECK_EQ(0, elem_in0 - elem_start);
9012      CHECK_EQ(kIntSize, elem_in1 - elem_start);
9013      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
9014    }
9015#endif
9016
9017    // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
9018    __ add(r1, r1, Operand(r1, LSL, 1));
9019    __ add(r0, r0, Operand(r1, LSL, 2));
9020    // Check if cache matches: Double value is stored in uint32_t[2] array.
9021    __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
9022    __ cmp(r2, r4);
9023    __ b(ne, &runtime_call);
9024    __ cmp(r3, r5);
9025    __ b(ne, &runtime_call);
9026    // Cache hit. Load result, pop argument and return.
9027    __ mov(r0, Operand(r6));
9028    __ pop();
9029    __ Ret();
9030  }
9031
9032  __ bind(&runtime_call);
9033  __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
9034}
9035
9036
9037Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
9038  switch (type_) {
9039    // Add more cases when necessary.
9040    case TranscendentalCache::SIN: return Runtime::kMath_sin;
9041    case TranscendentalCache::COS: return Runtime::kMath_cos;
9042    default:
9043      UNIMPLEMENTED();
9044      return Runtime::kAbort;
9045  }
9046}
9047
9048
9049void StackCheckStub::Generate(MacroAssembler* masm) {
9050  // Do tail-call to runtime routine.  Runtime routines expect at least one
9051  // argument, so give it a Smi.
9052  __ mov(r0, Operand(Smi::FromInt(0)));
9053  __ push(r0);
9054  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
9055
9056  __ StubReturn(1);
9057}
9058
9059
9060void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
9061  Label slow, done;
9062
9063  Register heap_number_map = r6;
9064  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
9065
9066  if (op_ == Token::SUB) {
9067    // Check whether the value is a smi.
9068    Label try_float;
9069    __ tst(r0, Operand(kSmiTagMask));
9070    __ b(ne, &try_float);
9071
9072    // Go slow case if the value of the expression is zero
9073    // to make sure that we switch between 0 and -0.
9074    if (negative_zero_ == kStrictNegativeZero) {
9075      // If we have to check for zero, then we can check for the max negative
9076      // smi while we are at it.
9077      __ bic(ip, r0, Operand(0x80000000), SetCC);
9078      __ b(eq, &slow);
9079      __ rsb(r0, r0, Operand(0));
9080      __ StubReturn(1);
9081    } else {
9082      // The value of the expression is a smi and 0 is OK for -0.  Try
9083      // optimistic subtraction '0 - value'.
9084      __ rsb(r0, r0, Operand(0), SetCC);
9085      __ StubReturn(1, vc);
9086      // We don't have to reverse the optimistic neg since the only case
9087      // where we fall through is the minimum negative Smi, which is the case
9088      // where the neg leaves the register unchanged.
9089      __ jmp(&slow);  // Go slow on max negative Smi.
9090    }
9091
9092    __ bind(&try_float);
9093    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
9094    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
9095    __ cmp(r1, heap_number_map);
9096    __ b(ne, &slow);
9097    // r0 is a heap number.  Get a new heap number in r1.
9098    if (overwrite_ == UNARY_OVERWRITE) {
9099      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
9100      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
9101      __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
9102    } else {
9103      __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
9104      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
9105      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
9106      __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
9107      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
9108      __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
9109      __ mov(r0, Operand(r1));
9110    }
9111  } else if (op_ == Token::BIT_NOT) {
9112    // Check if the operand is a heap number.
9113    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
9114    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
9115    __ cmp(r1, heap_number_map);
9116    __ b(ne, &slow);
9117
9118    // Convert the heap number is r0 to an untagged integer in r1.
9119    GetInt32(masm, r0, r1, r2, r3, &slow);
9120
9121    // Do the bitwise operation (move negated) and check if the result
9122    // fits in a smi.
9123    Label try_float;
9124    __ mvn(r1, Operand(r1));
9125    __ add(r2, r1, Operand(0x40000000), SetCC);
9126    __ b(mi, &try_float);
9127    __ mov(r0, Operand(r1, LSL, kSmiTagSize));
9128    __ b(&done);
9129
9130    __ bind(&try_float);
9131    if (!overwrite_ == UNARY_OVERWRITE) {
9132      // Allocate a fresh heap number, but don't overwrite r0 until
9133      // we're sure we can do it without going through the slow case
9134      // that needs the value in r0.
9135      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
9136      __ mov(r0, Operand(r2));
9137    }
9138
9139    if (CpuFeatures::IsSupported(VFP3)) {
9140      // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
9141      CpuFeatures::Scope scope(VFP3);
9142      __ vmov(s0, r1);
9143      __ vcvt_f64_s32(d0, s0);
9144      __ sub(r2, r0, Operand(kHeapObjectTag));
9145      __ vstr(d0, r2, HeapNumber::kValueOffset);
9146    } else {
9147      // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
9148      // have to set up a frame.
9149      WriteInt32ToHeapNumberStub stub(r1, r0, r2);
9150      __ push(lr);
9151      __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
9152      __ pop(lr);
9153    }
9154  } else {
9155    UNIMPLEMENTED();
9156  }
9157
9158  __ bind(&done);
9159  __ StubReturn(1);
9160
9161  // Handle the slow case by jumping to the JavaScript builtin.
9162  __ bind(&slow);
9163  __ push(r0);
9164  switch (op_) {
9165    case Token::SUB:
9166      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
9167      break;
9168    case Token::BIT_NOT:
9169      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
9170      break;
9171    default:
9172      UNREACHABLE();
9173  }
9174}
9175
9176
9177void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
9178  // r0 holds the exception.
9179
9180  // Adjust this code if not the case.
9181  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
9182
9183  // Drop the sp to the top of the handler.
9184  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
9185  __ ldr(sp, MemOperand(r3));
9186
9187  // Restore the next handler and frame pointer, discard handler state.
9188  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
9189  __ pop(r2);
9190  __ str(r2, MemOperand(r3));
9191  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
9192  __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
9193
9194  // Before returning we restore the context from the frame pointer if
9195  // not NULL.  The frame pointer is NULL in the exception handler of a
9196  // JS entry frame.
9197  __ cmp(fp, Operand(0));
9198  // Set cp to NULL if fp is NULL.
9199  __ mov(cp, Operand(0), LeaveCC, eq);
9200  // Restore cp otherwise.
9201  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
9202#ifdef DEBUG
9203  if (FLAG_debug_code) {
9204    __ mov(lr, Operand(pc));
9205  }
9206#endif
9207  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
9208  __ pop(pc);
9209}
9210
9211
9212void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
9213                                          UncatchableExceptionType type) {
9214  // Adjust this code if not the case.
9215  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
9216
9217  // Drop sp to the top stack handler.
9218  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
9219  __ ldr(sp, MemOperand(r3));
9220
9221  // Unwind the handlers until the ENTRY handler is found.
9222  Label loop, done;
9223  __ bind(&loop);
9224  // Load the type of the current stack handler.
9225  const int kStateOffset = StackHandlerConstants::kStateOffset;
9226  __ ldr(r2, MemOperand(sp, kStateOffset));
9227  __ cmp(r2, Operand(StackHandler::ENTRY));
9228  __ b(eq, &done);
9229  // Fetch the next handler in the list.
9230  const int kNextOffset = StackHandlerConstants::kNextOffset;
9231  __ ldr(sp, MemOperand(sp, kNextOffset));
9232  __ jmp(&loop);
9233  __ bind(&done);
9234
9235  // Set the top handler address to next handler past the current ENTRY handler.
9236  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
9237  __ pop(r2);
9238  __ str(r2, MemOperand(r3));
9239
9240  if (type == OUT_OF_MEMORY) {
9241    // Set external caught exception to false.
9242    ExternalReference external_caught(Top::k_external_caught_exception_address);
9243    __ mov(r0, Operand(false));
9244    __ mov(r2, Operand(external_caught));
9245    __ str(r0, MemOperand(r2));
9246
9247    // Set pending exception and r0 to out of memory exception.
9248    Failure* out_of_memory = Failure::OutOfMemoryException();
9249    __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
9250    __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
9251    __ str(r0, MemOperand(r2));
9252  }
9253
9254  // Stack layout at this point. See also StackHandlerConstants.
9255  // sp ->   state (ENTRY)
9256  //         fp
9257  //         lr
9258
9259  // Discard handler state (r2 is not used) and restore frame pointer.
9260  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
9261  __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
9262  // Before returning we restore the context from the frame pointer if
9263  // not NULL.  The frame pointer is NULL in the exception handler of a
9264  // JS entry frame.
9265  __ cmp(fp, Operand(0));
9266  // Set cp to NULL if fp is NULL.
9267  __ mov(cp, Operand(0), LeaveCC, eq);
9268  // Restore cp otherwise.
9269  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
9270#ifdef DEBUG
9271  if (FLAG_debug_code) {
9272    __ mov(lr, Operand(pc));
9273  }
9274#endif
9275  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
9276  __ pop(pc);
9277}
9278
9279
9280void CEntryStub::GenerateCore(MacroAssembler* masm,
9281                              Label* throw_normal_exception,
9282                              Label* throw_termination_exception,
9283                              Label* throw_out_of_memory_exception,
9284                              bool do_gc,
9285                              bool always_allocate,
9286                              int frame_alignment_skew) {
9287  // r0: result parameter for PerformGC, if any
9288  // r4: number of arguments including receiver  (C callee-saved)
9289  // r5: pointer to builtin function  (C callee-saved)
9290  // r6: pointer to the first argument (C callee-saved)
9291
9292  if (do_gc) {
9293    // Passing r0.
9294    __ PrepareCallCFunction(1, r1);
9295    __ CallCFunction(ExternalReference::perform_gc_function(), 1);
9296  }
9297
9298  ExternalReference scope_depth =
9299      ExternalReference::heap_always_allocate_scope_depth();
9300  if (always_allocate) {
9301    __ mov(r0, Operand(scope_depth));
9302    __ ldr(r1, MemOperand(r0));
9303    __ add(r1, r1, Operand(1));
9304    __ str(r1, MemOperand(r0));
9305  }
9306
9307  // Call C built-in.
9308  // r0 = argc, r1 = argv
9309  __ mov(r0, Operand(r4));
9310  __ mov(r1, Operand(r6));
9311
9312  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
9313  int frame_alignment_mask = frame_alignment - 1;
9314#if defined(V8_HOST_ARCH_ARM)
9315  if (FLAG_debug_code) {
9316    if (frame_alignment > kPointerSize) {
9317      Label alignment_as_expected;
9318      ASSERT(IsPowerOf2(frame_alignment));
9319      __ sub(r2, sp, Operand(frame_alignment_skew));
9320      __ tst(r2, Operand(frame_alignment_mask));
9321      __ b(eq, &alignment_as_expected);
9322      // Don't use Check here, as it will call Runtime_Abort re-entering here.
9323      __ stop("Unexpected alignment");
9324      __ bind(&alignment_as_expected);
9325    }
9326  }
9327#endif
9328
9329  // Just before the call (jump) below lr is pushed, so the actual alignment is
9330  // adding one to the current skew.
9331  int alignment_before_call =
9332      (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
9333  if (alignment_before_call > 0) {
9334    // Push until the alignment before the call is met.
9335    __ mov(r2, Operand(0));
9336    for (int i = alignment_before_call;
9337        (i & frame_alignment_mask) != 0;
9338        i += kPointerSize) {
9339      __ push(r2);
9340    }
9341  }
9342
9343  // TODO(1242173): To let the GC traverse the return address of the exit
9344  // frames, we need to know where the return address is. Right now,
9345  // we push it on the stack to be able to find it again, but we never
9346  // restore from it in case of changes, which makes it impossible to
9347  // support moving the C entry code stub. This should be fixed, but currently
9348  // this is OK because the CEntryStub gets generated so early in the V8 boot
9349  // sequence that it is not moving ever.
9350  masm->add(lr, pc, Operand(4));  // Compute return address: (pc + 8) + 4
9351  masm->push(lr);
9352  masm->Jump(r5);
9353
9354  // Restore sp back to before aligning the stack.
9355  if (alignment_before_call > 0) {
9356    __ add(sp, sp, Operand(alignment_before_call));
9357  }
9358
9359  if (always_allocate) {
9360    // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
9361    // though (contain the result).
9362    __ mov(r2, Operand(scope_depth));
9363    __ ldr(r3, MemOperand(r2));
9364    __ sub(r3, r3, Operand(1));
9365    __ str(r3, MemOperand(r2));
9366  }
9367
9368  // check for failure result
9369  Label failure_returned;
9370  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
9371  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
9372  __ add(r2, r0, Operand(1));
9373  __ tst(r2, Operand(kFailureTagMask));
9374  __ b(eq, &failure_returned);
9375
9376  // Exit C frame and return.
9377  // r0:r1: result
9378  // sp: stack pointer
9379  // fp: frame pointer
9380  __ LeaveExitFrame(mode_);
9381
9382  // check if we should retry or throw exception
9383  Label retry;
9384  __ bind(&failure_returned);
9385  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
9386  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
9387  __ b(eq, &retry);
9388
9389  // Special handling of out of memory exceptions.
9390  Failure* out_of_memory = Failure::OutOfMemoryException();
9391  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
9392  __ b(eq, throw_out_of_memory_exception);
9393
9394  // Retrieve the pending exception and clear the variable.
9395  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
9396  __ ldr(r3, MemOperand(ip));
9397  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
9398  __ ldr(r0, MemOperand(ip));
9399  __ str(r3, MemOperand(ip));
9400
9401  // Special handling of termination exceptions which are uncatchable
9402  // by javascript code.
9403  __ cmp(r0, Operand(Factory::termination_exception()));
9404  __ b(eq, throw_termination_exception);
9405
9406  // Handle normal exception.
9407  __ jmp(throw_normal_exception);
9408
9409  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
9410}
9411
9412
9413void CEntryStub::Generate(MacroAssembler* masm) {
9414  // Called from JavaScript; parameters are on stack as if calling JS function
9415  // r0: number of arguments including receiver
9416  // r1: pointer to builtin function
9417  // fp: frame pointer  (restored after C call)
9418  // sp: stack pointer  (restored as callee's sp after C call)
9419  // cp: current context  (C callee-saved)
9420
9421  // Result returned in r0 or r0+r1 by default.
9422
9423  // NOTE: Invocations of builtins may return failure objects
9424  // instead of a proper result. The builtin entry handles
9425  // this by performing a garbage collection and retrying the
9426  // builtin once.
9427
9428  // Enter the exit frame that transitions from JavaScript to C++.
9429  __ EnterExitFrame(mode_);
9430
9431  // r4: number of arguments (C callee-saved)
9432  // r5: pointer to builtin function (C callee-saved)
9433  // r6: pointer to first argument (C callee-saved)
9434
9435  Label throw_normal_exception;
9436  Label throw_termination_exception;
9437  Label throw_out_of_memory_exception;
9438
9439  // Call into the runtime system.
9440  GenerateCore(masm,
9441               &throw_normal_exception,
9442               &throw_termination_exception,
9443               &throw_out_of_memory_exception,
9444               false,
9445               false,
9446               -kPointerSize);
9447
9448  // Do space-specific GC and retry runtime call.
9449  GenerateCore(masm,
9450               &throw_normal_exception,
9451               &throw_termination_exception,
9452               &throw_out_of_memory_exception,
9453               true,
9454               false,
9455               0);
9456
9457  // Do full GC and retry runtime call one final time.
9458  Failure* failure = Failure::InternalError();
9459  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
9460  GenerateCore(masm,
9461               &throw_normal_exception,
9462               &throw_termination_exception,
9463               &throw_out_of_memory_exception,
9464               true,
9465               true,
9466               kPointerSize);
9467
9468  __ bind(&throw_out_of_memory_exception);
9469  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
9470
9471  __ bind(&throw_termination_exception);
9472  GenerateThrowUncatchable(masm, TERMINATION);
9473
9474  __ bind(&throw_normal_exception);
9475  GenerateThrowTOS(masm);
9476}
9477
9478
9479void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
9480  // r0: code entry
9481  // r1: function
9482  // r2: receiver
9483  // r3: argc
9484  // [sp+0]: argv
9485
9486  Label invoke, exit;
9487
9488  // Called from C, so do not pop argc and args on exit (preserve sp)
9489  // No need to save register-passed args
9490  // Save callee-saved registers (incl. cp and fp), sp, and lr
9491  __ stm(db_w, sp, kCalleeSaved | lr.bit());
9492
9493  // Get address of argv, see stm above.
9494  // r0: code entry
9495  // r1: function
9496  // r2: receiver
9497  // r3: argc
9498  __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize));  // argv
9499
9500  // Push a frame with special values setup to mark it as an entry frame.
9501  // r0: code entry
9502  // r1: function
9503  // r2: receiver
9504  // r3: argc
9505  // r4: argv
9506  __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used.
9507  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
9508  __ mov(r7, Operand(Smi::FromInt(marker)));
9509  __ mov(r6, Operand(Smi::FromInt(marker)));
9510  __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
9511  __ ldr(r5, MemOperand(r5));
9512  __ Push(r8, r7, r6, r5);
9513
9514  // Setup frame pointer for the frame to be pushed.
9515  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
9516
9517  // Call a faked try-block that does the invoke.
9518  __ bl(&invoke);
9519
9520  // Caught exception: Store result (exception) in the pending
9521  // exception field in the JSEnv and return a failure sentinel.
9522  // Coming in here the fp will be invalid because the PushTryHandler below
9523  // sets it to 0 to signal the existence of the JSEntry frame.
9524  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
9525  __ str(r0, MemOperand(ip));
9526  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
9527  __ b(&exit);
9528
9529  // Invoke: Link this frame into the handler chain.
9530  __ bind(&invoke);
9531  // Must preserve r0-r4, r5-r7 are available.
9532  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
9533  // If an exception not caught by another handler occurs, this handler
9534  // returns control to the code after the bl(&invoke) above, which
9535  // restores all kCalleeSaved registers (including cp and fp) to their
9536  // saved values before returning a failure to C.
9537
9538  // Clear any pending exceptions.
9539  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
9540  __ ldr(r5, MemOperand(ip));
9541  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
9542  __ str(r5, MemOperand(ip));
9543
9544  // Invoke the function by calling through JS entry trampoline builtin.
9545  // Notice that we cannot store a reference to the trampoline code directly in
9546  // this stub, because runtime stubs are not traversed when doing GC.
9547
9548  // Expected registers by Builtins::JSEntryTrampoline
9549  // r0: code entry
9550  // r1: function
9551  // r2: receiver
9552  // r3: argc
9553  // r4: argv
9554  if (is_construct) {
9555    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
9556    __ mov(ip, Operand(construct_entry));
9557  } else {
9558    ExternalReference entry(Builtins::JSEntryTrampoline);
9559    __ mov(ip, Operand(entry));
9560  }
9561  __ ldr(ip, MemOperand(ip));  // deref address
9562
9563  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
9564  // macro for the add instruction because we don't want the coverage tool
9565  // inserting instructions here after we read the pc.
9566  __ mov(lr, Operand(pc));
9567  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
9568
9569  // Unlink this frame from the handler chain. When reading the
9570  // address of the next handler, there is no need to use the address
9571  // displacement since the current stack pointer (sp) points directly
9572  // to the stack handler.
9573  __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
9574  __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
9575  __ str(r3, MemOperand(ip));
9576  // No need to restore registers
9577  __ add(sp, sp, Operand(StackHandlerConstants::kSize));
9578
9579
9580  __ bind(&exit);  // r0 holds result
9581  // Restore the top frame descriptors from the stack.
9582  __ pop(r3);
9583  __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
9584  __ str(r3, MemOperand(ip));
9585
9586  // Reset the stack to the callee saved registers.
9587  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
9588
9589  // Restore callee-saved registers and return.
9590#ifdef DEBUG
9591  if (FLAG_debug_code) {
9592    __ mov(lr, Operand(pc));
9593  }
9594#endif
9595  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
9596}
9597
9598
9599// This stub performs an instanceof, calling the builtin function if
9600// necessary.  Uses r1 for the object, r0 for the function that it may
9601// be an instance of (these are fetched from the stack).
9602void InstanceofStub::Generate(MacroAssembler* masm) {
9603  // Get the object - slow case for smis (we may need to throw an exception
9604  // depending on the rhs).
9605  Label slow, loop, is_instance, is_not_instance;
9606  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
9607  __ BranchOnSmi(r0, &slow);
9608
9609  // Check that the left hand is a JS object and put map in r3.
9610  __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
9611  __ b(lt, &slow);
9612  __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
9613  __ b(gt, &slow);
9614
9615  // Get the prototype of the function (r4 is result, r2 is scratch).
9616  __ ldr(r1, MemOperand(sp, 0));
9617  // r1 is function, r3 is map.
9618
9619  // Look up the function and the map in the instanceof cache.
9620  Label miss;
9621  __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
9622  __ cmp(r1, ip);
9623  __ b(ne, &miss);
9624  __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
9625  __ cmp(r3, ip);
9626  __ b(ne, &miss);
9627  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9628  __ pop();
9629  __ pop();
9630  __ mov(pc, Operand(lr));
9631
9632  __ bind(&miss);
9633  __ TryGetFunctionPrototype(r1, r4, r2, &slow);
9634
9635  // Check that the function prototype is a JS object.
9636  __ BranchOnSmi(r4, &slow);
9637  __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
9638  __ b(lt, &slow);
9639  __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
9640  __ b(gt, &slow);
9641
9642  __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
9643  __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
9644
9645  // Register mapping: r3 is object map and r4 is function prototype.
9646  // Get prototype of object into r2.
9647  __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
9648
9649  // Loop through the prototype chain looking for the function prototype.
9650  __ bind(&loop);
9651  __ cmp(r2, Operand(r4));
9652  __ b(eq, &is_instance);
9653  __ LoadRoot(ip, Heap::kNullValueRootIndex);
9654  __ cmp(r2, ip);
9655  __ b(eq, &is_not_instance);
9656  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
9657  __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
9658  __ jmp(&loop);
9659
9660  __ bind(&is_instance);
9661  __ mov(r0, Operand(Smi::FromInt(0)));
9662  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9663  __ pop();
9664  __ pop();
9665  __ mov(pc, Operand(lr));  // Return.
9666
9667  __ bind(&is_not_instance);
9668  __ mov(r0, Operand(Smi::FromInt(1)));
9669  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9670  __ pop();
9671  __ pop();
9672  __ mov(pc, Operand(lr));  // Return.
9673
9674  // Slow-case.  Tail call builtin.
9675  __ bind(&slow);
9676  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
9677}
9678
9679
9680void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
9681  // The displacement is the offset of the last parameter (if any)
9682  // relative to the frame pointer.
9683  static const int kDisplacement =
9684      StandardFrameConstants::kCallerSPOffset - kPointerSize;
9685
9686  // Check that the key is a smi.
9687  Label slow;
9688  __ BranchOnNotSmi(r1, &slow);
9689
9690  // Check if the calling frame is an arguments adaptor frame.
9691  Label adaptor;
9692  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
9693  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
9694  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
9695  __ b(eq, &adaptor);
9696
9697  // Check index against formal parameters count limit passed in
9698  // through register r0. Use unsigned comparison to get negative
9699  // check for free.
9700  __ cmp(r1, r0);
9701  __ b(cs, &slow);
9702
9703  // Read the argument from the stack and return it.
9704  __ sub(r3, r0, r1);
9705  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
9706  __ ldr(r0, MemOperand(r3, kDisplacement));
9707  __ Jump(lr);
9708
9709  // Arguments adaptor case: Check index against actual arguments
9710  // limit found in the arguments adaptor frame. Use unsigned
9711  // comparison to get negative check for free.
9712  __ bind(&adaptor);
9713  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
9714  __ cmp(r1, r0);
9715  __ b(cs, &slow);
9716
9717  // Read the argument from the adaptor frame and return it.
9718  __ sub(r3, r0, r1);
9719  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
9720  __ ldr(r0, MemOperand(r3, kDisplacement));
9721  __ Jump(lr);
9722
9723  // Slow-case: Handle non-smi or out-of-bounds access to arguments
9724  // by calling the runtime system.
9725  __ bind(&slow);
9726  __ push(r1);
9727  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
9728}
9729
9730
9731void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
9732  // sp[0] : number of parameters
9733  // sp[4] : receiver displacement
9734  // sp[8] : function
9735
9736  // Check if the calling frame is an arguments adaptor frame.
9737  Label adaptor_frame, try_allocate, runtime;
9738  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
9739  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
9740  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
9741  __ b(eq, &adaptor_frame);
9742
9743  // Get the length from the frame.
9744  __ ldr(r1, MemOperand(sp, 0));
9745  __ b(&try_allocate);
9746
9747  // Patch the arguments.length and the parameters pointer.
9748  __ bind(&adaptor_frame);
9749  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
9750  __ str(r1, MemOperand(sp, 0));
9751  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
9752  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
9753  __ str(r3, MemOperand(sp, 1 * kPointerSize));
9754
9755  // Try the new space allocation. Start out with computing the size
9756  // of the arguments object and the elements array in words.
9757  Label add_arguments_object;
9758  __ bind(&try_allocate);
9759  __ cmp(r1, Operand(0));
9760  __ b(eq, &add_arguments_object);
9761  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
9762  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
9763  __ bind(&add_arguments_object);
9764  __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
9765
9766  // Do the allocation of both objects in one go.
9767  __ AllocateInNewSpace(
9768      r1,
9769      r0,
9770      r2,
9771      r3,
9772      &runtime,
9773      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
9774
9775  // Get the arguments boilerplate from the current (global) context.
9776  int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
9777  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
9778  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
9779  __ ldr(r4, MemOperand(r4, offset));
9780
9781  // Copy the JS object part.
9782  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
9783    __ ldr(r3, FieldMemOperand(r4, i));
9784    __ str(r3, FieldMemOperand(r0, i));
9785  }
9786
9787  // Setup the callee in-object property.
9788  STATIC_ASSERT(Heap::arguments_callee_index == 0);
9789  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
9790  __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
9791
9792  // Get the length (smi tagged) and set that as an in-object property too.
9793  STATIC_ASSERT(Heap::arguments_length_index == 1);
9794  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
9795  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
9796
9797  // If there are no actual arguments, we're done.
9798  Label done;
9799  __ cmp(r1, Operand(0));
9800  __ b(eq, &done);
9801
9802  // Get the parameters pointer from the stack.
9803  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
9804
9805  // Setup the elements pointer in the allocated arguments object and
9806  // initialize the header in the elements fixed array.
9807  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
9808  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
9809  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
9810  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
9811  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
9812  __ mov(r1, Operand(r1, LSR, kSmiTagSize));  // Untag the length for the loop.
9813
9814  // Copy the fixed array slots.
9815  Label loop;
9816  // Setup r4 to point to the first array slot.
9817  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
9818  __ bind(&loop);
9819  // Pre-decrement r2 with kPointerSize on each iteration.
9820  // Pre-decrement in order to skip receiver.
9821  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
9822  // Post-increment r4 with kPointerSize on each iteration.
9823  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
9824  __ sub(r1, r1, Operand(1));
9825  __ cmp(r1, Operand(0));
9826  __ b(ne, &loop);
9827
9828  // Return and remove the on-stack parameters.
9829  __ bind(&done);
9830  __ add(sp, sp, Operand(3 * kPointerSize));
9831  __ Ret();
9832
9833  // Do the runtime call to allocate the arguments object.
9834  __ bind(&runtime);
9835  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
9836}
9837
9838
9839void RegExpExecStub::Generate(MacroAssembler* masm) {
9840  // Just jump directly to runtime if native RegExp is not selected at compile
9841  // time or if regexp entry in generated code is turned off runtime switch or
9842  // at compilation.
9843#ifdef V8_INTERPRETED_REGEXP
9844  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9845#else  // V8_INTERPRETED_REGEXP
9846  if (!FLAG_regexp_entry_native) {
9847    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9848    return;
9849  }
9850
9851  // Stack frame on entry.
9852  //  sp[0]: last_match_info (expected JSArray)
9853  //  sp[4]: previous index
9854  //  sp[8]: subject string
9855  //  sp[12]: JSRegExp object
9856
9857  static const int kLastMatchInfoOffset = 0 * kPointerSize;
9858  static const int kPreviousIndexOffset = 1 * kPointerSize;
9859  static const int kSubjectOffset = 2 * kPointerSize;
9860  static const int kJSRegExpOffset = 3 * kPointerSize;
9861
9862  Label runtime, invoke_regexp;
9863
9864  // Allocation of registers for this function. These are in callee save
9865  // registers and will be preserved by the call to the native RegExp code, as
9866  // this code is called using the normal C calling convention. When calling
9867  // directly from generated code the native RegExp code will not do a GC and
9868  // therefore the content of these registers are safe to use after the call.
9869  Register subject = r4;
9870  Register regexp_data = r5;
9871  Register last_match_info_elements = r6;
9872
9873  // Ensure that a RegExp stack is allocated.
9874  ExternalReference address_of_regexp_stack_memory_address =
9875      ExternalReference::address_of_regexp_stack_memory_address();
9876  ExternalReference address_of_regexp_stack_memory_size =
9877      ExternalReference::address_of_regexp_stack_memory_size();
9878  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
9879  __ ldr(r0, MemOperand(r0, 0));
9880  __ tst(r0, Operand(r0));
9881  __ b(eq, &runtime);
9882
9883  // Check that the first argument is a JSRegExp object.
9884  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
9885  STATIC_ASSERT(kSmiTag == 0);
9886  __ tst(r0, Operand(kSmiTagMask));
9887  __ b(eq, &runtime);
9888  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
9889  __ b(ne, &runtime);
9890
9891  // Check that the RegExp has been compiled (data contains a fixed array).
9892  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
9893  if (FLAG_debug_code) {
9894    __ tst(regexp_data, Operand(kSmiTagMask));
9895    __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
9896    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
9897    __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
9898  }
9899
9900  // regexp_data: RegExp data (FixedArray)
9901  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
9902  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
9903  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
9904  __ b(ne, &runtime);
9905
9906  // regexp_data: RegExp data (FixedArray)
9907  // Check that the number of captures fit in the static offsets vector buffer.
9908  __ ldr(r2,
9909         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
9910  // Calculate number of capture registers (number_of_captures + 1) * 2. This
9911  // uses the asumption that smis are 2 * their untagged value.
9912  STATIC_ASSERT(kSmiTag == 0);
9913  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
9914  __ add(r2, r2, Operand(2));  // r2 was a smi.
9915  // Check that the static offsets vector buffer is large enough.
9916  __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
9917  __ b(hi, &runtime);
9918
9919  // r2: Number of capture registers
9920  // regexp_data: RegExp data (FixedArray)
9921  // Check that the second argument is a string.
9922  __ ldr(subject, MemOperand(sp, kSubjectOffset));
9923  __ tst(subject, Operand(kSmiTagMask));
9924  __ b(eq, &runtime);
9925  Condition is_string = masm->IsObjectStringType(subject, r0);
9926  __ b(NegateCondition(is_string), &runtime);
9927  // Get the length of the string to r3.
9928  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
9929
9930  // r2: Number of capture registers
9931  // r3: Length of subject string as a smi
9932  // subject: Subject string
9933  // regexp_data: RegExp data (FixedArray)
9934  // Check that the third argument is a positive smi less than the subject
9935  // string length. A negative value will be greater (unsigned comparison).
9936  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
9937  __ tst(r0, Operand(kSmiTagMask));
9938  __ b(ne, &runtime);
9939  __ cmp(r3, Operand(r0));
9940  __ b(ls, &runtime);
9941
9942  // r2: Number of capture registers
9943  // subject: Subject string
9944  // regexp_data: RegExp data (FixedArray)
9945  // Check that the fourth object is a JSArray object.
9946  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
9947  __ tst(r0, Operand(kSmiTagMask));
9948  __ b(eq, &runtime);
9949  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
9950  __ b(ne, &runtime);
9951  // Check that the JSArray is in fast case.
9952  __ ldr(last_match_info_elements,
9953         FieldMemOperand(r0, JSArray::kElementsOffset));
9954  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
9955  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
9956  __ cmp(r0, ip);
9957  __ b(ne, &runtime);
9958  // Check that the last match info has space for the capture registers and the
9959  // additional information.
9960  __ ldr(r0,
9961         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
9962  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
9963  __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
9964  __ b(gt, &runtime);
9965
9966  // subject: Subject string
9967  // regexp_data: RegExp data (FixedArray)
9968  // Check the representation and encoding of the subject string.
9969  Label seq_string;
9970  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
9971  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
9972  // First check for flat string.
9973  __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
9974  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
9975  __ b(eq, &seq_string);
9976
9977  // subject: Subject string
9978  // regexp_data: RegExp data (FixedArray)
9979  // Check for flat cons string.
9980  // A flat cons string is a cons string where the second part is the empty
9981  // string. In that case the subject string is just the first part of the cons
9982  // string. Also in this case the first part of the cons string is known to be
9983  // a sequential string or an external string.
9984  STATIC_ASSERT(kExternalStringTag !=0);
9985  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
9986  __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
9987  __ b(ne, &runtime);
9988  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
9989  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
9990  __ cmp(r0, r1);
9991  __ b(ne, &runtime);
9992  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
9993  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
9994  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
9995  // Is first part a flat string?
9996  STATIC_ASSERT(kSeqStringTag == 0);
9997  __ tst(r0, Operand(kStringRepresentationMask));
9998  __ b(nz, &runtime);
9999
10000  __ bind(&seq_string);
10001  // subject: Subject string
10002  // regexp_data: RegExp data (FixedArray)
10003  // r0: Instance type of subject string
10004  STATIC_ASSERT(4 == kAsciiStringTag);
10005  STATIC_ASSERT(kTwoByteStringTag == 0);
10006  // Find the code object based on the assumptions above.
10007  __ and_(r0, r0, Operand(kStringEncodingMask));
10008  __ mov(r3, Operand(r0, ASR, 2), SetCC);
10009  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
10010  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
10011
10012  // Check that the irregexp code has been generated for the actual string
10013  // encoding. If it has, the field contains a code object otherwise it contains
10014  // the hole.
10015  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
10016  __ b(ne, &runtime);
10017
10018  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
10019  // r7: code
10020  // subject: Subject string
10021  // regexp_data: RegExp data (FixedArray)
10022  // Load used arguments before starting to push arguments for call to native
10023  // RegExp code to avoid handling changing stack height.
10024  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
10025  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
10026
10027  // r1: previous index
10028  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
10029  // r7: code
10030  // subject: Subject string
10031  // regexp_data: RegExp data (FixedArray)
10032  // All checks done. Now push arguments for native regexp code.
10033  __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
10034
10035  static const int kRegExpExecuteArguments = 7;
10036  __ push(lr);
10037  __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
10038
10039  // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
10040  __ mov(r0, Operand(1));
10041  __ str(r0, MemOperand(sp, 2 * kPointerSize));
10042
10043  // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
10044  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
10045  __ ldr(r0, MemOperand(r0, 0));
10046  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
10047  __ ldr(r2, MemOperand(r2, 0));
10048  __ add(r0, r0, Operand(r2));
10049  __ str(r0, MemOperand(sp, 1 * kPointerSize));
10050
10051  // Argument 5 (sp[0]): static offsets vector buffer.
10052  __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
10053  __ str(r0, MemOperand(sp, 0 * kPointerSize));
10054
10055  // For arguments 4 and 3 get string length, calculate start of string data and
10056  // calculate the shift of the index (0 for ASCII and 1 for two byte).
10057  __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
10058  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
10059  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
10060  __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10061  __ eor(r3, r3, Operand(1));
10062  // Argument 4 (r3): End of string data
10063  // Argument 3 (r2): Start of string data
10064  __ add(r2, r9, Operand(r1, LSL, r3));
10065  __ add(r3, r9, Operand(r0, LSL, r3));
10066
10067  // Argument 2 (r1): Previous index.
10068  // Already there
10069
10070  // Argument 1 (r0): Subject string.
10071  __ mov(r0, subject);
10072
10073  // Locate the code entry and call it.
10074  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
10075  __ CallCFunction(r7, kRegExpExecuteArguments);
10076  __ pop(lr);
10077
10078  // r0: result
10079  // subject: subject string (callee saved)
10080  // regexp_data: RegExp data (callee saved)
10081  // last_match_info_elements: Last match info elements (callee saved)
10082
10083  // Check the result.
10084  Label success;
10085  __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
10086  __ b(eq, &success);
10087  Label failure;
10088  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
10089  __ b(eq, &failure);
10090  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
10091  // If not exception it can only be retry. Handle that in the runtime system.
10092  __ b(ne, &runtime);
10093  // Result must now be exception. If there is no pending exception already a
10094  // stack overflow (on the backtrack stack) was detected in RegExp code but
10095  // haven't created the exception yet. Handle that in the runtime system.
10096  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
10097  __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
10098  __ ldr(r0, MemOperand(r0, 0));
10099  __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
10100  __ ldr(r1, MemOperand(r1, 0));
10101  __ cmp(r0, r1);
10102  __ b(eq, &runtime);
10103  __ bind(&failure);
10104  // For failure and exception return null.
10105  __ mov(r0, Operand(Factory::null_value()));
10106  __ add(sp, sp, Operand(4 * kPointerSize));
10107  __ Ret();
10108
10109  // Process the result from the native regexp code.
10110  __ bind(&success);
10111  __ ldr(r1,
10112         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
10113  // Calculate number of capture registers (number_of_captures + 1) * 2.
10114  STATIC_ASSERT(kSmiTag == 0);
10115  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
10116  __ add(r1, r1, Operand(2));  // r1 was a smi.
10117
10118  // r1: number of capture registers
10119  // r4: subject string
10120  // Store the capture count.
10121  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi.
10122  __ str(r2, FieldMemOperand(last_match_info_elements,
10123                             RegExpImpl::kLastCaptureCountOffset));
10124  // Store last subject and last input.
10125  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
10126  __ str(subject,
10127         FieldMemOperand(last_match_info_elements,
10128                         RegExpImpl::kLastSubjectOffset));
10129  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
10130  __ str(subject,
10131         FieldMemOperand(last_match_info_elements,
10132                         RegExpImpl::kLastInputOffset));
10133  __ mov(r3, last_match_info_elements);
10134  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
10135
10136  // Get the static offsets vector filled by the native regexp code.
10137  ExternalReference address_of_static_offsets_vector =
10138      ExternalReference::address_of_static_offsets_vector();
10139  __ mov(r2, Operand(address_of_static_offsets_vector));
10140
10141  // r1: number of capture registers
10142  // r2: offsets vector
10143  Label next_capture, done;
10144  // Capture register counter starts from number of capture registers and
10145  // counts down until wraping after zero.
10146  __ add(r0,
10147         last_match_info_elements,
10148         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
10149  __ bind(&next_capture);
10150  __ sub(r1, r1, Operand(1), SetCC);
10151  __ b(mi, &done);
10152  // Read the value from the static offsets vector buffer.
10153  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
10154  // Store the smi value in the last match info.
10155  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
10156  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
10157  __ jmp(&next_capture);
10158  __ bind(&done);
10159
10160  // Return last match info.
10161  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
10162  __ add(sp, sp, Operand(4 * kPointerSize));
10163  __ Ret();
10164
10165  // Do the runtime call to execute the regexp.
10166  __ bind(&runtime);
10167  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
10168#endif  // V8_INTERPRETED_REGEXP
10169}
10170
10171
10172void CallFunctionStub::Generate(MacroAssembler* masm) {
10173  Label slow;
10174
10175  // If the receiver might be a value (string, number or boolean) check for this
10176  // and box it if it is.
10177  if (ReceiverMightBeValue()) {
10178    // Get the receiver from the stack.
10179    // function, receiver [, arguments]
10180    Label receiver_is_value, receiver_is_js_object;
10181    __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
10182
10183    // Check if receiver is a smi (which is a number value).
10184    __ BranchOnSmi(r1, &receiver_is_value);
10185
10186    // Check if the receiver is a valid JS object.
10187    __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
10188    __ b(ge, &receiver_is_js_object);
10189
10190    // Call the runtime to box the value.
10191    __ bind(&receiver_is_value);
10192    __ EnterInternalFrame();
10193    __ push(r1);
10194    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
10195    __ LeaveInternalFrame();
10196    __ str(r0, MemOperand(sp, argc_ * kPointerSize));
10197
10198    __ bind(&receiver_is_js_object);
10199  }
10200
10201  // Get the function to call from the stack.
10202  // function, receiver [, arguments]
10203  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
10204
10205  // Check that the function is really a JavaScript function.
10206  // r1: pushed function (to be verified)
10207  __ BranchOnSmi(r1, &slow);
10208  // Get the map of the function object.
10209  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
10210  __ b(ne, &slow);
10211
10212  // Fast-case: Invoke the function now.
10213  // r1: pushed function
10214  ParameterCount actual(argc_);
10215  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
10216
10217  // Slow-case: Non-function called.
10218  __ bind(&slow);
10219  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
10220  // of the original receiver from the call site).
10221  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
10222  __ mov(r0, Operand(argc_));  // Setup the number of arguments.
10223  __ mov(r2, Operand(0));
10224  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
10225  __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
10226          RelocInfo::CODE_TARGET);
10227}
10228
10229
10230// Unfortunately you have to run without snapshots to see most of these
10231// names in the profile since most compare stubs end up in the snapshot.
10232const char* CompareStub::GetName() {
10233  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
10234         (lhs_.is(r1) && rhs_.is(r0)));
10235
10236  if (name_ != NULL) return name_;
10237  const int kMaxNameLength = 100;
10238  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
10239  if (name_ == NULL) return "OOM";
10240
10241  const char* cc_name;
10242  switch (cc_) {
10243    case lt: cc_name = "LT"; break;
10244    case gt: cc_name = "GT"; break;
10245    case le: cc_name = "LE"; break;
10246    case ge: cc_name = "GE"; break;
10247    case eq: cc_name = "EQ"; break;
10248    case ne: cc_name = "NE"; break;
10249    default: cc_name = "UnknownCondition"; break;
10250  }
10251
10252  const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
10253  const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
10254
10255  const char* strict_name = "";
10256  if (strict_ && (cc_ == eq || cc_ == ne)) {
10257    strict_name = "_STRICT";
10258  }
10259
10260  const char* never_nan_nan_name = "";
10261  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
10262    never_nan_nan_name = "_NO_NAN";
10263  }
10264
10265  const char* include_number_compare_name = "";
10266  if (!include_number_compare_) {
10267    include_number_compare_name = "_NO_NUMBER";
10268  }
10269
10270  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
10271               "CompareStub_%s%s%s%s%s%s",
10272               cc_name,
10273               lhs_name,
10274               rhs_name,
10275               strict_name,
10276               never_nan_nan_name,
10277               include_number_compare_name);
10278  return name_;
10279}
10280
10281
10282int CompareStub::MinorKey() {
10283  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
10284  // stubs the never NaN NaN condition is only taken into account if the
10285  // condition is equals.
10286  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
10287  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
10288         (lhs_.is(r1) && rhs_.is(r0)));
10289  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
10290         | RegisterField::encode(lhs_.is(r0))
10291         | StrictField::encode(strict_)
10292         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
10293         | IncludeNumberCompareField::encode(include_number_compare_);
10294}
10295
10296
10297// StringCharCodeAtGenerator
10298
10299void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
10300  Label flat_string;
10301  Label ascii_string;
10302  Label got_char_code;
10303
10304  // If the receiver is a smi trigger the non-string case.
10305  __ BranchOnSmi(object_, receiver_not_string_);
10306
10307  // Fetch the instance type of the receiver into result register.
10308  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
10309  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
10310  // If the receiver is not a string trigger the non-string case.
10311  __ tst(result_, Operand(kIsNotStringMask));
10312  __ b(ne, receiver_not_string_);
10313
10314  // If the index is non-smi trigger the non-smi case.
10315  __ BranchOnNotSmi(index_, &index_not_smi_);
10316
10317  // Put smi-tagged index into scratch register.
10318  __ mov(scratch_, index_);
10319  __ bind(&got_smi_index_);
10320
10321  // Check for index out of range.
10322  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
10323  __ cmp(ip, Operand(scratch_));
10324  __ b(ls, index_out_of_range_);
10325
10326  // We need special handling for non-flat strings.
10327  STATIC_ASSERT(kSeqStringTag == 0);
10328  __ tst(result_, Operand(kStringRepresentationMask));
10329  __ b(eq, &flat_string);
10330
10331  // Handle non-flat strings.
10332  __ tst(result_, Operand(kIsConsStringMask));
10333  __ b(eq, &call_runtime_);
10334
10335  // ConsString.
10336  // Check whether the right hand side is the empty string (i.e. if
10337  // this is really a flat string in a cons string). If that is not
10338  // the case we would rather go to the runtime system now to flatten
10339  // the string.
10340  __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
10341  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
10342  __ cmp(result_, Operand(ip));
10343  __ b(ne, &call_runtime_);
10344  // Get the first of the two strings and load its instance type.
10345  __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
10346  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
10347  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
10348  // If the first cons component is also non-flat, then go to runtime.
10349  STATIC_ASSERT(kSeqStringTag == 0);
10350  __ tst(result_, Operand(kStringRepresentationMask));
10351  __ b(nz, &call_runtime_);
10352
10353  // Check for 1-byte or 2-byte string.
10354  __ bind(&flat_string);
10355  STATIC_ASSERT(kAsciiStringTag != 0);
10356  __ tst(result_, Operand(kStringEncodingMask));
10357  __ b(nz, &ascii_string);
10358
10359  // 2-byte string.
10360  // Load the 2-byte character code into the result register. We can
10361  // add without shifting since the smi tag size is the log2 of the
10362  // number of bytes in a two-byte character.
10363  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
10364  __ add(scratch_, object_, Operand(scratch_));
10365  __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
10366  __ jmp(&got_char_code);
10367
10368  // ASCII string.
10369  // Load the byte into the result register.
10370  __ bind(&ascii_string);
10371  __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
10372  __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
10373
10374  __ bind(&got_char_code);
10375  __ mov(result_, Operand(result_, LSL, kSmiTagSize));
10376  __ bind(&exit_);
10377}
10378
10379
10380void StringCharCodeAtGenerator::GenerateSlow(
10381    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
10382  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
10383
10384  // Index is not a smi.
10385  __ bind(&index_not_smi_);
10386  // If index is a heap number, try converting it to an integer.
10387  __ CheckMap(index_,
10388              scratch_,
10389              Heap::kHeapNumberMapRootIndex,
10390              index_not_number_,
10391              true);
10392  call_helper.BeforeCall(masm);
10393  __ Push(object_, index_);
10394  __ push(index_);  // Consumed by runtime conversion function.
10395  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
10396    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
10397  } else {
10398    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
10399    // NumberToSmi discards numbers that are not exact integers.
10400    __ CallRuntime(Runtime::kNumberToSmi, 1);
10401  }
10402  if (!scratch_.is(r0)) {
10403    // Save the conversion result before the pop instructions below
10404    // have a chance to overwrite it.
10405    __ mov(scratch_, r0);
10406  }
10407  __ pop(index_);
10408  __ pop(object_);
10409  // Reload the instance type.
10410  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
10411  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
10412  call_helper.AfterCall(masm);
10413  // If index is still not a smi, it must be out of range.
10414  __ BranchOnNotSmi(scratch_, index_out_of_range_);
10415  // Otherwise, return to the fast path.
10416  __ jmp(&got_smi_index_);
10417
10418  // Call runtime. We get here when the receiver is a string and the
10419  // index is a number, but the code of getting the actual character
10420  // is too complex (e.g., when the string needs to be flattened).
10421  __ bind(&call_runtime_);
10422  call_helper.BeforeCall(masm);
10423  __ Push(object_, index_);
10424  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
10425  if (!result_.is(r0)) {
10426    __ mov(result_, r0);
10427  }
10428  call_helper.AfterCall(masm);
10429  __ jmp(&exit_);
10430
10431  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
10432}
10433
10434
10435// -------------------------------------------------------------------------
10436// StringCharFromCodeGenerator
10437
10438void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
10439  // Fast case of Heap::LookupSingleCharacterStringFromCode.
10440  STATIC_ASSERT(kSmiTag == 0);
10441  STATIC_ASSERT(kSmiShiftSize == 0);
10442  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
10443  __ tst(code_,
10444         Operand(kSmiTagMask |
10445                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
10446  __ b(nz, &slow_case_);
10447
10448  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
10449  // At this point code register contains smi tagged ascii char code.
10450  STATIC_ASSERT(kSmiTag == 0);
10451  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
10452  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
10453  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
10454  __ cmp(result_, Operand(ip));
10455  __ b(eq, &slow_case_);
10456  __ bind(&exit_);
10457}
10458
10459
10460void StringCharFromCodeGenerator::GenerateSlow(
10461    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
10462  __ Abort("Unexpected fallthrough to CharFromCode slow case");
10463
10464  __ bind(&slow_case_);
10465  call_helper.BeforeCall(masm);
10466  __ push(code_);
10467  __ CallRuntime(Runtime::kCharFromCode, 1);
10468  if (!result_.is(r0)) {
10469    __ mov(result_, r0);
10470  }
10471  call_helper.AfterCall(masm);
10472  __ jmp(&exit_);
10473
10474  __ Abort("Unexpected fallthrough from CharFromCode slow case");
10475}
10476
10477
10478// -------------------------------------------------------------------------
10479// StringCharAtGenerator
10480
10481void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
10482  char_code_at_generator_.GenerateFast(masm);
10483  char_from_code_generator_.GenerateFast(masm);
10484}
10485
10486
10487void StringCharAtGenerator::GenerateSlow(
10488    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
10489  char_code_at_generator_.GenerateSlow(masm, call_helper);
10490  char_from_code_generator_.GenerateSlow(masm, call_helper);
10491}
10492
10493
10494void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
10495                                          Register dest,
10496                                          Register src,
10497                                          Register count,
10498                                          Register scratch,
10499                                          bool ascii) {
10500  Label loop;
10501  Label done;
10502  // This loop just copies one character at a time, as it is only used for very
10503  // short strings.
10504  if (!ascii) {
10505    __ add(count, count, Operand(count), SetCC);
10506  } else {
10507    __ cmp(count, Operand(0));
10508  }
10509  __ b(eq, &done);
10510
10511  __ bind(&loop);
10512  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
10513  // Perform sub between load and dependent store to get the load time to
10514  // complete.
10515  __ sub(count, count, Operand(1), SetCC);
10516  __ strb(scratch, MemOperand(dest, 1, PostIndex));
10517  // last iteration.
10518  __ b(gt, &loop);
10519
10520  __ bind(&done);
10521}
10522
10523
10524enum CopyCharactersFlags {
10525  COPY_ASCII = 1,
10526  DEST_ALWAYS_ALIGNED = 2
10527};
10528
10529
10530void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
10531                                              Register dest,
10532                                              Register src,
10533                                              Register count,
10534                                              Register scratch1,
10535                                              Register scratch2,
10536                                              Register scratch3,
10537                                              Register scratch4,
10538                                              Register scratch5,
10539                                              int flags) {
10540  bool ascii = (flags & COPY_ASCII) != 0;
10541  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
10542
10543  if (dest_always_aligned && FLAG_debug_code) {
10544    // Check that destination is actually word aligned if the flag says
10545    // that it is.
10546    __ tst(dest, Operand(kPointerAlignmentMask));
10547    __ Check(eq, "Destination of copy not aligned.");
10548  }
10549
10550  const int kReadAlignment = 4;
10551  const int kReadAlignmentMask = kReadAlignment - 1;
10552  // Ensure that reading an entire aligned word containing the last character
10553  // of a string will not read outside the allocated area (because we pad up
10554  // to kObjectAlignment).
10555  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
10556  // Assumes word reads and writes are little endian.
10557  // Nothing to do for zero characters.
10558  Label done;
10559  if (!ascii) {
10560    __ add(count, count, Operand(count), SetCC);
10561  } else {
10562    __ cmp(count, Operand(0));
10563  }
10564  __ b(eq, &done);
10565
10566  // Assume that you cannot read (or write) unaligned.
10567  Label byte_loop;
10568  // Must copy at least eight bytes, otherwise just do it one byte at a time.
10569  __ cmp(count, Operand(8));
10570  __ add(count, dest, Operand(count));
10571  Register limit = count;  // Read until src equals this.
10572  __ b(lt, &byte_loop);
10573
10574  if (!dest_always_aligned) {
10575    // Align dest by byte copying. Copies between zero and three bytes.
10576    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
10577    Label dest_aligned;
10578    __ b(eq, &dest_aligned);
10579    __ cmp(scratch4, Operand(2));
10580    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
10581    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
10582    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
10583    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
10584    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
10585    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
10586    __ bind(&dest_aligned);
10587  }
10588
10589  Label simple_loop;
10590
10591  __ sub(scratch4, dest, Operand(src));
10592  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
10593  __ b(eq, &simple_loop);
10594  // Shift register is number of bits in a source word that
10595  // must be combined with bits in the next source word in order
10596  // to create a destination word.
10597
10598  // Complex loop for src/dst that are not aligned the same way.
10599  {
10600    Label loop;
10601    __ mov(scratch4, Operand(scratch4, LSL, 3));
10602    Register left_shift = scratch4;
10603    __ and_(src, src, Operand(~3));  // Round down to load previous word.
10604    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
10605    // Store the "shift" most significant bits of scratch in the least
10606    // signficant bits (i.e., shift down by (32-shift)).
10607    __ rsb(scratch2, left_shift, Operand(32));
10608    Register right_shift = scratch2;
10609    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
10610
10611    __ bind(&loop);
10612    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
10613    __ sub(scratch5, limit, Operand(dest));
10614    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
10615    __ str(scratch1, MemOperand(dest, 4, PostIndex));
10616    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
10617    // Loop if four or more bytes left to copy.
10618    // Compare to eight, because we did the subtract before increasing dst.
10619    __ sub(scratch5, scratch5, Operand(8), SetCC);
10620    __ b(ge, &loop);
10621  }
10622  // There is now between zero and three bytes left to copy (negative that
10623  // number is in scratch5), and between one and three bytes already read into
10624  // scratch1 (eight times that number in scratch4). We may have read past
10625  // the end of the string, but because objects are aligned, we have not read
10626  // past the end of the object.
10627  // Find the minimum of remaining characters to move and preloaded characters
10628  // and write those as bytes.
10629  __ add(scratch5, scratch5, Operand(4), SetCC);
10630  __ b(eq, &done);
10631  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
10632  // Move minimum of bytes read and bytes left to copy to scratch4.
10633  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
10634  // Between one and three (value in scratch5) characters already read into
10635  // scratch ready to write.
10636  __ cmp(scratch5, Operand(2));
10637  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
10638  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
10639  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
10640  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
10641  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
10642  // Copy any remaining bytes.
10643  __ b(&byte_loop);
10644
10645  // Simple loop.
10646  // Copy words from src to dst, until less than four bytes left.
10647  // Both src and dest are word aligned.
10648  __ bind(&simple_loop);
10649  {
10650    Label loop;
10651    __ bind(&loop);
10652    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
10653    __ sub(scratch3, limit, Operand(dest));
10654    __ str(scratch1, MemOperand(dest, 4, PostIndex));
10655    // Compare to 8, not 4, because we do the substraction before increasing
10656    // dest.
10657    __ cmp(scratch3, Operand(8));
10658    __ b(ge, &loop);
10659  }
10660
10661  // Copy bytes from src to dst until dst hits limit.
10662  __ bind(&byte_loop);
10663  __ cmp(dest, Operand(limit));
10664  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
10665  __ b(ge, &done);
10666  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
10667  __ b(&byte_loop);
10668
10669  __ bind(&done);
10670}
10671
10672
10673void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
10674                                                        Register c1,
10675                                                        Register c2,
10676                                                        Register scratch1,
10677                                                        Register scratch2,
10678                                                        Register scratch3,
10679                                                        Register scratch4,
10680                                                        Register scratch5,
10681                                                        Label* not_found) {
10682  // Register scratch3 is the general scratch register in this function.
10683  Register scratch = scratch3;
10684
10685  // Make sure that both characters are not digits as such strings has a
10686  // different hash algorithm. Don't try to look for these in the symbol table.
10687  Label not_array_index;
10688  __ sub(scratch, c1, Operand(static_cast<int>('0')));
10689  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
10690  __ b(hi, &not_array_index);
10691  __ sub(scratch, c2, Operand(static_cast<int>('0')));
10692  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
10693
10694  // If check failed combine both characters into single halfword.
10695  // This is required by the contract of the method: code at the
10696  // not_found branch expects this combination in c1 register
10697  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
10698  __ b(ls, not_found);
10699
10700  __ bind(&not_array_index);
10701  // Calculate the two character string hash.
10702  Register hash = scratch1;
10703  StringHelper::GenerateHashInit(masm, hash, c1);
10704  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
10705  StringHelper::GenerateHashGetHash(masm, hash);
10706
10707  // Collect the two characters in a register.
10708  Register chars = c1;
10709  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
10710
10711  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10712  // hash:  hash of two character string.
10713
10714  // Load symbol table
10715  // Load address of first element of the symbol table.
10716  Register symbol_table = c2;
10717  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
10718
10719  // Load undefined value
10720  Register undefined = scratch4;
10721  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
10722
10723  // Calculate capacity mask from the symbol table capacity.
10724  Register mask = scratch2;
10725  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
10726  __ mov(mask, Operand(mask, ASR, 1));
10727  __ sub(mask, mask, Operand(1));
10728
10729  // Calculate untagged address of the first element of the symbol table.
10730  Register first_symbol_table_element = symbol_table;
10731  __ add(first_symbol_table_element, symbol_table,
10732         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
10733
10734  // Registers
10735  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10736  // hash:  hash of two character string
10737  // mask:  capacity mask
10738  // first_symbol_table_element: address of the first element of
10739  //                             the symbol table
10740  // scratch: -
10741
10742  // Perform a number of probes in the symbol table.
10743  static const int kProbes = 4;
10744  Label found_in_symbol_table;
10745  Label next_probe[kProbes];
10746  for (int i = 0; i < kProbes; i++) {
10747    Register candidate = scratch5;  // Scratch register contains candidate.
10748
10749    // Calculate entry in symbol table.
10750    if (i > 0) {
10751      __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
10752    } else {
10753      __ mov(candidate, hash);
10754    }
10755
10756    __ and_(candidate, candidate, Operand(mask));
10757
10758    // Load the entry from the symble table.
10759    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
10760    __ ldr(candidate,
10761           MemOperand(first_symbol_table_element,
10762                      candidate,
10763                      LSL,
10764                      kPointerSizeLog2));
10765
10766    // If entry is undefined no string with this hash can be found.
10767    __ cmp(candidate, undefined);
10768    __ b(eq, not_found);
10769
10770    // If length is not 2 the string is not a candidate.
10771    __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
10772    __ cmp(scratch, Operand(Smi::FromInt(2)));
10773    __ b(ne, &next_probe[i]);
10774
10775    // Check that the candidate is a non-external ascii string.
10776    __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
10777    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
10778    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
10779                                              &next_probe[i]);
10780
10781    // Check if the two characters match.
10782    // Assumes that word load is little endian.
10783    __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
10784    __ cmp(chars, scratch);
10785    __ b(eq, &found_in_symbol_table);
10786    __ bind(&next_probe[i]);
10787  }
10788
10789  // No matching 2 character string found by probing.
10790  __ jmp(not_found);
10791
10792  // Scratch register contains result when we fall through to here.
10793  Register result = scratch;
10794  __ bind(&found_in_symbol_table);
10795  __ Move(r0, result);
10796}
10797
10798
10799void StringHelper::GenerateHashInit(MacroAssembler* masm,
10800                                    Register hash,
10801                                    Register character) {
10802  // hash = character + (character << 10);
10803  __ add(hash, character, Operand(character, LSL, 10));
10804  // hash ^= hash >> 6;
10805  __ eor(hash, hash, Operand(hash, ASR, 6));
10806}
10807
10808
10809void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
10810                                            Register hash,
10811                                            Register character) {
10812  // hash += character;
10813  __ add(hash, hash, Operand(character));
10814  // hash += hash << 10;
10815  __ add(hash, hash, Operand(hash, LSL, 10));
10816  // hash ^= hash >> 6;
10817  __ eor(hash, hash, Operand(hash, ASR, 6));
10818}
10819
10820
10821void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
10822                                       Register hash) {
10823  // hash += hash << 3;
10824  __ add(hash, hash, Operand(hash, LSL, 3));
10825  // hash ^= hash >> 11;
10826  __ eor(hash, hash, Operand(hash, ASR, 11));
10827  // hash += hash << 15;
10828  __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
10829
10830  // if (hash == 0) hash = 27;
10831  __ mov(hash, Operand(27), LeaveCC, nz);
10832}
10833
10834
10835void SubStringStub::Generate(MacroAssembler* masm) {
10836  Label runtime;
10837
10838  // Stack frame on entry.
10839  //  lr: return address
10840  //  sp[0]: to
10841  //  sp[4]: from
10842  //  sp[8]: string
10843
10844  // This stub is called from the native-call %_SubString(...), so
10845  // nothing can be assumed about the arguments. It is tested that:
10846  //  "string" is a sequential string,
10847  //  both "from" and "to" are smis, and
10848  //  0 <= from <= to <= string.length.
10849  // If any of these assumptions fail, we call the runtime system.
10850
10851  static const int kToOffset = 0 * kPointerSize;
10852  static const int kFromOffset = 1 * kPointerSize;
10853  static const int kStringOffset = 2 * kPointerSize;
10854
10855
10856  // Check bounds and smi-ness.
10857  __ ldr(r7, MemOperand(sp, kToOffset));
10858  __ ldr(r6, MemOperand(sp, kFromOffset));
10859  STATIC_ASSERT(kSmiTag == 0);
10860  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
10861  // I.e., arithmetic shift right by one un-smi-tags.
10862  __ mov(r2, Operand(r7, ASR, 1), SetCC);
10863  __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
10864  // If either r2 or r6 had the smi tag bit set, then carry is set now.
10865  __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
10866  __ b(mi, &runtime);  // From is negative.
10867
10868  __ sub(r2, r2, Operand(r3), SetCC);
10869  __ b(mi, &runtime);  // Fail if from > to.
10870  // Special handling of sub-strings of length 1 and 2. One character strings
10871  // are handled in the runtime system (looked up in the single character
10872  // cache). Two character strings are looked for in the symbol cache.
10873  __ cmp(r2, Operand(2));
10874  __ b(lt, &runtime);
10875
10876  // r2: length
10877  // r3: from index (untaged smi)
10878  // r6: from (smi)
10879  // r7: to (smi)
10880
10881  // Make sure first argument is a sequential (or flat) string.
10882  __ ldr(r5, MemOperand(sp, kStringOffset));
10883  STATIC_ASSERT(kSmiTag == 0);
10884  __ tst(r5, Operand(kSmiTagMask));
10885  __ b(eq, &runtime);
10886  Condition is_string = masm->IsObjectStringType(r5, r1);
10887  __ b(NegateCondition(is_string), &runtime);
10888
10889  // r1: instance type
10890  // r2: length
10891  // r3: from index (untaged smi)
10892  // r5: string
10893  // r6: from (smi)
10894  // r7: to (smi)
10895  Label seq_string;
10896  __ and_(r4, r1, Operand(kStringRepresentationMask));
10897  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
10898  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
10899  __ cmp(r4, Operand(kConsStringTag));
10900  __ b(gt, &runtime);  // External strings go to runtime.
10901  __ b(lt, &seq_string);  // Sequential strings are handled directly.
10902
10903  // Cons string. Try to recurse (once) on the first substring.
10904  // (This adds a little more generality than necessary to handle flattened
10905  // cons strings, but not much).
10906  __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
10907  __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
10908  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10909  __ tst(r1, Operand(kStringRepresentationMask));
10910  STATIC_ASSERT(kSeqStringTag == 0);
10911  __ b(ne, &runtime);  // Cons and External strings go to runtime.
10912
10913  // Definitly a sequential string.
10914  __ bind(&seq_string);
10915
10916  // r1: instance type.
10917  // r2: length
10918  // r3: from index (untaged smi)
10919  // r5: string
10920  // r6: from (smi)
10921  // r7: to (smi)
10922  __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
10923  __ cmp(r4, Operand(r7));
10924  __ b(lt, &runtime);  // Fail if to > length.
10925
10926  // r1: instance type.
10927  // r2: result string length.
10928  // r3: from index (untaged smi)
10929  // r5: string.
10930  // r6: from offset (smi)
10931  // Check for flat ascii string.
10932  Label non_ascii_flat;
10933  __ tst(r1, Operand(kStringEncodingMask));
10934  STATIC_ASSERT(kTwoByteStringTag == 0);
10935  __ b(eq, &non_ascii_flat);
10936
10937  Label result_longer_than_two;
10938  __ cmp(r2, Operand(2));
10939  __ b(gt, &result_longer_than_two);
10940
10941  // Sub string of length 2 requested.
10942  // Get the two characters forming the sub string.
10943  __ add(r5, r5, Operand(r3));
10944  __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
10945  __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
10946
10947  // Try to lookup two character string in symbol table.
10948  Label make_two_character_string;
10949  StringHelper::GenerateTwoCharacterSymbolTableProbe(
10950      masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
10951  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10952  __ add(sp, sp, Operand(3 * kPointerSize));
10953  __ Ret();
10954
10955  // r2: result string length.
10956  // r3: two characters combined into halfword in little endian byte order.
10957  __ bind(&make_two_character_string);
10958  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
10959  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
10960  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10961  __ add(sp, sp, Operand(3 * kPointerSize));
10962  __ Ret();
10963
10964  __ bind(&result_longer_than_two);
10965
10966  // Allocate the result.
10967  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
10968
10969  // r0: result string.
10970  // r2: result string length.
10971  // r5: string.
10972  // r6: from offset (smi)
10973  // Locate first character of result.
10974  __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10975  // Locate 'from' character of string.
10976  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10977  __ add(r5, r5, Operand(r6, ASR, 1));
10978
10979  // r0: result string.
10980  // r1: first character of result string.
10981  // r2: result string length.
10982  // r5: first character of sub string to copy.
10983  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
10984  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
10985                                           COPY_ASCII | DEST_ALWAYS_ALIGNED);
10986  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10987  __ add(sp, sp, Operand(3 * kPointerSize));
10988  __ Ret();
10989
10990  __ bind(&non_ascii_flat);
10991  // r2: result string length.
10992  // r5: string.
10993  // r6: from offset (smi)
10994  // Check for flat two byte string.
10995
10996  // Allocate the result.
10997  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
10998
10999  // r0: result string.
11000  // r2: result string length.
11001  // r5: string.
11002  // Locate first character of result.
11003  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11004  // Locate 'from' character of string.
11005    __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11006  // As "from" is a smi it is 2 times the value which matches the size of a two
11007  // byte character.
11008  __ add(r5, r5, Operand(r6));
11009
11010  // r0: result string.
11011  // r1: first character of result.
11012  // r2: result length.
11013  // r5: first character of string to copy.
11014  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
11015  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
11016                                           DEST_ALWAYS_ALIGNED);
11017  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
11018  __ add(sp, sp, Operand(3 * kPointerSize));
11019  __ Ret();
11020
11021  // Just jump to runtime to create the sub string.
11022  __ bind(&runtime);
11023  __ TailCallRuntime(Runtime::kSubString, 3, 1);
11024}
11025
11026
11027void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
11028                                                        Register left,
11029                                                        Register right,
11030                                                        Register scratch1,
11031                                                        Register scratch2,
11032                                                        Register scratch3,
11033                                                        Register scratch4) {
11034  Label compare_lengths;
11035  // Find minimum length and length difference.
11036  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
11037  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
11038  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
11039  Register length_delta = scratch3;
11040  __ mov(scratch1, scratch2, LeaveCC, gt);
11041  Register min_length = scratch1;
11042  STATIC_ASSERT(kSmiTag == 0);
11043  __ tst(min_length, Operand(min_length));
11044  __ b(eq, &compare_lengths);
11045
11046  // Untag smi.
11047  __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
11048
11049  // Setup registers so that we only need to increment one register
11050  // in the loop.
11051  __ add(scratch2, min_length,
11052         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
11053  __ add(left, left, Operand(scratch2));
11054  __ add(right, right, Operand(scratch2));
11055  // Registers left and right points to the min_length character of strings.
11056  __ rsb(min_length, min_length, Operand(-1));
11057  Register index = min_length;
11058  // Index starts at -min_length.
11059
11060  {
11061    // Compare loop.
11062    Label loop;
11063    __ bind(&loop);
11064    // Compare characters.
11065    __ add(index, index, Operand(1), SetCC);
11066    __ ldrb(scratch2, MemOperand(left, index), ne);
11067    __ ldrb(scratch4, MemOperand(right, index), ne);
11068    // Skip to compare lengths with eq condition true.
11069    __ b(eq, &compare_lengths);
11070    __ cmp(scratch2, scratch4);
11071    __ b(eq, &loop);
11072    // Fallthrough with eq condition false.
11073  }
11074  // Compare lengths -  strings up to min-length are equal.
11075  __ bind(&compare_lengths);
11076  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
11077  // Use zero length_delta as result.
11078  __ mov(r0, Operand(length_delta), SetCC, eq);
11079  // Fall through to here if characters compare not-equal.
11080  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
11081  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
11082  __ Ret();
11083}
11084
11085
11086void StringCompareStub::Generate(MacroAssembler* masm) {
11087  Label runtime;
11088
11089  // Stack frame on entry.
11090  //  sp[0]: right string
11091  //  sp[4]: left string
11092  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // left
11093  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // right
11094
11095  Label not_same;
11096  __ cmp(r0, r1);
11097  __ b(ne, &not_same);
11098  STATIC_ASSERT(EQUAL == 0);
11099  STATIC_ASSERT(kSmiTag == 0);
11100  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
11101  __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
11102  __ add(sp, sp, Operand(2 * kPointerSize));
11103  __ Ret();
11104
11105  __ bind(&not_same);
11106
11107  // Check that both objects are sequential ascii strings.
11108  __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
11109
11110  // Compare flat ascii strings natively. Remove arguments from stack first.
11111  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
11112  __ add(sp, sp, Operand(2 * kPointerSize));
11113  GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
11114
11115  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
11116  // tagged as a small integer.
11117  __ bind(&runtime);
11118  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
11119}
11120
11121
11122void StringAddStub::Generate(MacroAssembler* masm) {
11123  Label string_add_runtime;
11124  // Stack on entry:
11125  // sp[0]: second argument.
11126  // sp[4]: first argument.
11127
11128  // Load the two arguments.
11129  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
11130  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
11131
11132  // Make sure that both arguments are strings if not known in advance.
11133  if (string_check_) {
11134    STATIC_ASSERT(kSmiTag == 0);
11135    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
11136    // Load instance types.
11137    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
11138    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
11139    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
11140    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
11141    STATIC_ASSERT(kStringTag == 0);
11142    // If either is not a string, go to runtime.
11143    __ tst(r4, Operand(kIsNotStringMask));
11144    __ tst(r5, Operand(kIsNotStringMask), eq);
11145    __ b(ne, &string_add_runtime);
11146  }
11147
11148  // Both arguments are strings.
11149  // r0: first string
11150  // r1: second string
11151  // r4: first string instance type (if string_check_)
11152  // r5: second string instance type (if string_check_)
11153  {
11154    Label strings_not_empty;
11155    // Check if either of the strings are empty. In that case return the other.
11156    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
11157    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
11158    STATIC_ASSERT(kSmiTag == 0);
11159    __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
11160    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
11161    STATIC_ASSERT(kSmiTag == 0);
11162     // Else test if second string is empty.
11163    __ cmp(r3, Operand(Smi::FromInt(0)), ne);
11164    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
11165
11166    __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11167    __ add(sp, sp, Operand(2 * kPointerSize));
11168    __ Ret();
11169
11170    __ bind(&strings_not_empty);
11171  }
11172
11173  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
11174  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
11175  // Both strings are non-empty.
11176  // r0: first string
11177  // r1: second string
11178  // r2: length of first string
11179  // r3: length of second string
11180  // r4: first string instance type (if string_check_)
11181  // r5: second string instance type (if string_check_)
11182  // Look at the length of the result of adding the two strings.
11183  Label string_add_flat_result, longer_than_two;
11184  // Adding two lengths can't overflow.
11185  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
11186  __ add(r6, r2, Operand(r3));
11187  // Use the runtime system when adding two one character strings, as it
11188  // contains optimizations for this specific case using the symbol table.
11189  __ cmp(r6, Operand(2));
11190  __ b(ne, &longer_than_two);
11191
11192  // Check that both strings are non-external ascii strings.
11193  if (!string_check_) {
11194    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
11195    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
11196    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
11197    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
11198  }
11199  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
11200                                                  &string_add_runtime);
11201
11202  // Get the two characters forming the sub string.
11203  __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
11204  __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
11205
11206  // Try to lookup two character string in symbol table. If it is not found
11207  // just allocate a new one.
11208  Label make_two_character_string;
11209  StringHelper::GenerateTwoCharacterSymbolTableProbe(
11210      masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
11211  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11212  __ add(sp, sp, Operand(2 * kPointerSize));
11213  __ Ret();
11214
11215  __ bind(&make_two_character_string);
11216  // Resulting string has length 2 and first chars of two strings
11217  // are combined into single halfword in r2 register.
11218  // So we can fill resulting string without two loops by a single
11219  // halfword store instruction (which assumes that processor is
11220  // in a little endian mode)
11221  __ mov(r6, Operand(2));
11222  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
11223  __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
11224  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11225  __ add(sp, sp, Operand(2 * kPointerSize));
11226  __ Ret();
11227
11228  __ bind(&longer_than_two);
11229  // Check if resulting string will be flat.
11230  __ cmp(r6, Operand(String::kMinNonFlatLength));
11231  __ b(lt, &string_add_flat_result);
11232  // Handle exceptionally long strings in the runtime system.
11233  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
11234  ASSERT(IsPowerOf2(String::kMaxLength + 1));
11235  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
11236  __ cmp(r6, Operand(String::kMaxLength + 1));
11237  __ b(hs, &string_add_runtime);
11238
11239  // If result is not supposed to be flat, allocate a cons string object.
11240  // If both strings are ascii the result is an ascii cons string.
11241  if (!string_check_) {
11242    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
11243    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
11244    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
11245    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
11246  }
11247  Label non_ascii, allocated, ascii_data;
11248  STATIC_ASSERT(kTwoByteStringTag == 0);
11249  __ tst(r4, Operand(kStringEncodingMask));
11250  __ tst(r5, Operand(kStringEncodingMask), ne);
11251  __ b(eq, &non_ascii);
11252
11253  // Allocate an ASCII cons string.
11254  __ bind(&ascii_data);
11255  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
11256  __ bind(&allocated);
11257  // Fill the fields of the cons string.
11258  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
11259  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
11260  __ mov(r0, Operand(r7));
11261  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11262  __ add(sp, sp, Operand(2 * kPointerSize));
11263  __ Ret();
11264
11265  __ bind(&non_ascii);
11266  // At least one of the strings is two-byte. Check whether it happens
11267  // to contain only ascii characters.
11268  // r4: first instance type.
11269  // r5: second instance type.
11270  __ tst(r4, Operand(kAsciiDataHintMask));
11271  __ tst(r5, Operand(kAsciiDataHintMask), ne);
11272  __ b(ne, &ascii_data);
11273  __ eor(r4, r4, Operand(r5));
11274  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
11275  __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
11276  __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
11277  __ b(eq, &ascii_data);
11278
11279  // Allocate a two byte cons string.
11280  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
11281  __ jmp(&allocated);
11282
11283  // Handle creating a flat result. First check that both strings are
11284  // sequential and that they have the same encoding.
11285  // r0: first string
11286  // r1: second string
11287  // r2: length of first string
11288  // r3: length of second string
11289  // r4: first string instance type (if string_check_)
11290  // r5: second string instance type (if string_check_)
11291  // r6: sum of lengths.
11292  __ bind(&string_add_flat_result);
11293  if (!string_check_) {
11294    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
11295    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
11296    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
11297    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
11298  }
11299  // Check that both strings are sequential.
11300  STATIC_ASSERT(kSeqStringTag == 0);
11301  __ tst(r4, Operand(kStringRepresentationMask));
11302  __ tst(r5, Operand(kStringRepresentationMask), eq);
11303  __ b(ne, &string_add_runtime);
11304  // Now check if both strings have the same encoding (ASCII/Two-byte).
11305  // r0: first string.
11306  // r1: second string.
11307  // r2: length of first string.
11308  // r3: length of second string.
11309  // r6: sum of lengths..
11310  Label non_ascii_string_add_flat_result;
11311  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
11312  __ eor(r7, r4, Operand(r5));
11313  __ tst(r7, Operand(kStringEncodingMask));
11314  __ b(ne, &string_add_runtime);
11315  // And see if it's ASCII or two-byte.
11316  __ tst(r4, Operand(kStringEncodingMask));
11317  __ b(eq, &non_ascii_string_add_flat_result);
11318
11319  // Both strings are sequential ASCII strings. We also know that they are
11320  // short (since the sum of the lengths is less than kMinNonFlatLength).
11321  // r6: length of resulting flat string
11322  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
11323  // Locate first character of result.
11324  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
11325  // Locate first character of first argument.
11326  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
11327  // r0: first character of first string.
11328  // r1: second string.
11329  // r2: length of first string.
11330  // r3: length of second string.
11331  // r6: first character of result.
11332  // r7: result string.
11333  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
11334
11335  // Load second argument and locate first character.
11336  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
11337  // r1: first character of second string.
11338  // r3: length of second string.
11339  // r6: next character of result.
11340  // r7: result string.
11341  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
11342  __ mov(r0, Operand(r7));
11343  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11344  __ add(sp, sp, Operand(2 * kPointerSize));
11345  __ Ret();
11346
11347  __ bind(&non_ascii_string_add_flat_result);
11348  // Both strings are sequential two byte strings.
11349  // r0: first string.
11350  // r1: second string.
11351  // r2: length of first string.
11352  // r3: length of second string.
11353  // r6: sum of length of strings.
11354  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
11355  // r0: first string.
11356  // r1: second string.
11357  // r2: length of first string.
11358  // r3: length of second string.
11359  // r7: result string.
11360
11361  // Locate first character of result.
11362  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11363  // Locate first character of first argument.
11364  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11365
11366  // r0: first character of first string.
11367  // r1: second string.
11368  // r2: length of first string.
11369  // r3: length of second string.
11370  // r6: first character of result.
11371  // r7: result string.
11372  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
11373
11374  // Locate first character of second argument.
11375  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11376
11377  // r1: first character of second string.
11378  // r3: length of second string.
11379  // r6: next character of result (after copy of first string).
11380  // r7: result string.
11381  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
11382
11383  __ mov(r0, Operand(r7));
11384  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11385  __ add(sp, sp, Operand(2 * kPointerSize));
11386  __ Ret();
11387
11388  // Just jump to runtime to add the two strings.
11389  __ bind(&string_add_runtime);
11390  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
11391}
11392
11393
11394#undef __
11395
11396} }  // namespace v8::internal
11397
11398#endif  // V8_TARGET_ARCH_ARM
11399