codegen-arm.cc revision 8defd9ff6930b4e24729971a61cf7469daf119be
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compiler.h"
35#include "debug.h"
36#include "ic-inl.h"
37#include "jsregexp.h"
38#include "jump-target-light-inl.h"
39#include "parser.h"
40#include "regexp-macro-assembler.h"
41#include "regexp-stack.h"
42#include "register-allocator-inl.h"
43#include "runtime.h"
44#include "scopes.h"
45#include "virtual-frame-inl.h"
46#include "virtual-frame-arm-inl.h"
47
48namespace v8 {
49namespace internal {
50
51
52static void EmitIdenticalObjectComparison(MacroAssembler* masm,
53                                          Label* slow,
54                                          Condition cc,
55                                          bool never_nan_nan);
56static void EmitSmiNonsmiComparison(MacroAssembler* masm,
57                                    Label* lhs_not_nan,
58                                    Label* slow,
59                                    bool strict);
60static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
61static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
62static void MultiplyByKnownInt(MacroAssembler* masm,
63                               Register source,
64                               Register destination,
65                               int known_int);
66static bool IsEasyToMultiplyBy(int x);
67
68
69#define __ ACCESS_MASM(masm_)
70
71// -------------------------------------------------------------------------
72// Platform-specific DeferredCode functions.
73
74void DeferredCode::SaveRegisters() {
75  // On ARM you either have a completely spilled frame or you
76  // handle it yourself, but at the moment there's no automation
77  // of registers and deferred code.
78}
79
80
81void DeferredCode::RestoreRegisters() {
82}
83
84
85// -------------------------------------------------------------------------
86// Platform-specific RuntimeCallHelper functions.
87
88void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
89  frame_state_->frame()->AssertIsSpilled();
90}
91
92
93void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
94}
95
96
97void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
98  masm->EnterInternalFrame();
99}
100
101
102void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
103  masm->LeaveInternalFrame();
104}
105
106
107// -------------------------------------------------------------------------
108// CodeGenState implementation.
109
110CodeGenState::CodeGenState(CodeGenerator* owner)
111    : owner_(owner),
112      previous_(owner->state()) {
113  owner->set_state(this);
114}
115
116
117ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
118                                             JumpTarget* true_target,
119                                             JumpTarget* false_target)
120    : CodeGenState(owner),
121      true_target_(true_target),
122      false_target_(false_target) {
123  owner->set_state(this);
124}
125
126
127TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
128                                           Slot* slot,
129                                           TypeInfo type_info)
130    : CodeGenState(owner),
131      slot_(slot) {
132  owner->set_state(this);
133  old_type_info_ = owner->set_type_info(slot, type_info);
134}
135
136
137CodeGenState::~CodeGenState() {
138  ASSERT(owner_->state() == this);
139  owner_->set_state(previous_);
140}
141
142
143TypeInfoCodeGenState::~TypeInfoCodeGenState() {
144  owner()->set_type_info(slot_, old_type_info_);
145}
146
147// -------------------------------------------------------------------------
148// CodeGenerator implementation
149
150CodeGenerator::CodeGenerator(MacroAssembler* masm)
151    : deferred_(8),
152      masm_(masm),
153      info_(NULL),
154      frame_(NULL),
155      allocator_(NULL),
156      cc_reg_(al),
157      state_(NULL),
158      loop_nesting_(0),
159      type_info_(NULL),
160      function_return_(JumpTarget::BIDIRECTIONAL),
161      function_return_is_shadowed_(false) {
162}
163
164
165// Calling conventions:
166// fp: caller's frame pointer
167// sp: stack pointer
168// r1: called JS function
169// cp: callee's context
170
171void CodeGenerator::Generate(CompilationInfo* info) {
172  // Record the position for debugging purposes.
173  CodeForFunctionPosition(info->function());
174  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
175
176  // Initialize state.
177  info_ = info;
178
179  int slots = scope()->num_parameters() + scope()->num_stack_slots();
180  ScopedVector<TypeInfo> type_info_array(slots);
181  type_info_ = &type_info_array;
182
183  ASSERT(allocator_ == NULL);
184  RegisterAllocator register_allocator(this);
185  allocator_ = &register_allocator;
186  ASSERT(frame_ == NULL);
187  frame_ = new VirtualFrame();
188  cc_reg_ = al;
189
190  // Adjust for function-level loop nesting.
191  ASSERT_EQ(0, loop_nesting_);
192  loop_nesting_ = info->loop_nesting();
193
194  {
195    CodeGenState state(this);
196
197    // Entry:
198    // Stack: receiver, arguments
199    // lr: return address
200    // fp: caller's frame pointer
201    // sp: stack pointer
202    // r1: called JS function
203    // cp: callee's context
204    allocator_->Initialize();
205
206#ifdef DEBUG
207    if (strlen(FLAG_stop_at) > 0 &&
208        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
209      frame_->SpillAll();
210      __ stop("stop-at");
211    }
212#endif
213
214    if (info->mode() == CompilationInfo::PRIMARY) {
215      frame_->Enter();
216      // tos: code slot
217
218      // Allocate space for locals and initialize them.  This also checks
219      // for stack overflow.
220      frame_->AllocateStackSlots();
221
222      frame_->AssertIsSpilled();
223      int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
224      if (heap_slots > 0) {
225        // Allocate local context.
226        // Get outer context and create a new context based on it.
227        __ ldr(r0, frame_->Function());
228        frame_->EmitPush(r0);
229        if (heap_slots <= FastNewContextStub::kMaximumSlots) {
230          FastNewContextStub stub(heap_slots);
231          frame_->CallStub(&stub, 1);
232        } else {
233          frame_->CallRuntime(Runtime::kNewContext, 1);
234        }
235
236#ifdef DEBUG
237        JumpTarget verified_true;
238        __ cmp(r0, cp);
239        verified_true.Branch(eq);
240        __ stop("NewContext: r0 is expected to be the same as cp");
241        verified_true.Bind();
242#endif
243        // Update context local.
244        __ str(cp, frame_->Context());
245      }
246
247      // TODO(1241774): Improve this code:
248      // 1) only needed if we have a context
249      // 2) no need to recompute context ptr every single time
250      // 3) don't copy parameter operand code from SlotOperand!
251      {
252        Comment cmnt2(masm_, "[ copy context parameters into .context");
253        // Note that iteration order is relevant here! If we have the same
254        // parameter twice (e.g., function (x, y, x)), and that parameter
255        // needs to be copied into the context, it must be the last argument
256        // passed to the parameter that needs to be copied. This is a rare
257        // case so we don't check for it, instead we rely on the copying
258        // order: such a parameter is copied repeatedly into the same
259        // context location and thus the last value is what is seen inside
260        // the function.
261        frame_->AssertIsSpilled();
262        for (int i = 0; i < scope()->num_parameters(); i++) {
263          Variable* par = scope()->parameter(i);
264          Slot* slot = par->slot();
265          if (slot != NULL && slot->type() == Slot::CONTEXT) {
266            ASSERT(!scope()->is_global_scope());  // No params in global scope.
267            __ ldr(r1, frame_->ParameterAt(i));
268            // Loads r2 with context; used below in RecordWrite.
269            __ str(r1, SlotOperand(slot, r2));
270            // Load the offset into r3.
271            int slot_offset =
272                FixedArray::kHeaderSize + slot->index() * kPointerSize;
273            __ RecordWrite(r2, Operand(slot_offset), r3, r1);
274          }
275        }
276      }
277
278      // Store the arguments object.  This must happen after context
279      // initialization because the arguments object may be stored in
280      // the context.
281      if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
282        StoreArgumentsObject(true);
283      }
284
285      // Initialize ThisFunction reference if present.
286      if (scope()->is_function_scope() && scope()->function() != NULL) {
287        frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
288        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
289      }
290    } else {
291      // When used as the secondary compiler for splitting, r1, cp,
292      // fp, and lr have been pushed on the stack.  Adjust the virtual
293      // frame to match this state.
294      frame_->Adjust(4);
295
296      // Bind all the bailout labels to the beginning of the function.
297      List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
298      for (int i = 0; i < bailouts->length(); i++) {
299        __ bind(bailouts->at(i)->label());
300      }
301    }
302
303    // Initialize the function return target after the locals are set
304    // up, because it needs the expected frame height from the frame.
305    function_return_.SetExpectedHeight();
306    function_return_is_shadowed_ = false;
307
308    // Generate code to 'execute' declarations and initialize functions
309    // (source elements). In case of an illegal redeclaration we need to
310    // handle that instead of processing the declarations.
311    if (scope()->HasIllegalRedeclaration()) {
312      Comment cmnt(masm_, "[ illegal redeclarations");
313      scope()->VisitIllegalRedeclaration(this);
314    } else {
315      Comment cmnt(masm_, "[ declarations");
316      ProcessDeclarations(scope()->declarations());
317      // Bail out if a stack-overflow exception occurred when processing
318      // declarations.
319      if (HasStackOverflow()) return;
320    }
321
322    if (FLAG_trace) {
323      frame_->CallRuntime(Runtime::kTraceEnter, 0);
324      // Ignore the return value.
325    }
326
327    // Compile the body of the function in a vanilla state. Don't
328    // bother compiling all the code if the scope has an illegal
329    // redeclaration.
330    if (!scope()->HasIllegalRedeclaration()) {
331      Comment cmnt(masm_, "[ function body");
332#ifdef DEBUG
333      bool is_builtin = Bootstrapper::IsActive();
334      bool should_trace =
335          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
336      if (should_trace) {
337        frame_->CallRuntime(Runtime::kDebugTrace, 0);
338        // Ignore the return value.
339      }
340#endif
341      VisitStatements(info->function()->body());
342    }
343  }
344
345  // Handle the return from the function.
346  if (has_valid_frame()) {
347    // If there is a valid frame, control flow can fall off the end of
348    // the body.  In that case there is an implicit return statement.
349    ASSERT(!function_return_is_shadowed_);
350    frame_->PrepareForReturn();
351    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
352    if (function_return_.is_bound()) {
353      function_return_.Jump();
354    } else {
355      function_return_.Bind();
356      GenerateReturnSequence();
357    }
358  } else if (function_return_.is_linked()) {
359    // If the return target has dangling jumps to it, then we have not
360    // yet generated the return sequence.  This can happen when (a)
361    // control does not flow off the end of the body so we did not
362    // compile an artificial return statement just above, and (b) there
363    // are return statements in the body but (c) they are all shadowed.
364    function_return_.Bind();
365    GenerateReturnSequence();
366  }
367
368  // Adjust for function-level loop nesting.
369  ASSERT(loop_nesting_ == info->loop_nesting());
370  loop_nesting_ = 0;
371
372  // Code generation state must be reset.
373  ASSERT(!has_cc());
374  ASSERT(state_ == NULL);
375  ASSERT(loop_nesting() == 0);
376  ASSERT(!function_return_is_shadowed_);
377  function_return_.Unuse();
378  DeleteFrame();
379
380  // Process any deferred code using the register allocator.
381  if (!HasStackOverflow()) {
382    ProcessDeferred();
383  }
384
385  allocator_ = NULL;
386  type_info_ = NULL;
387}
388
389
390int CodeGenerator::NumberOfSlot(Slot* slot) {
391  if (slot == NULL) return kInvalidSlotNumber;
392  switch (slot->type()) {
393    case Slot::PARAMETER:
394      return slot->index();
395    case Slot::LOCAL:
396      return slot->index() + scope()->num_parameters();
397    default:
398      break;
399  }
400  return kInvalidSlotNumber;
401}
402
403
404MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
405  // Currently, this assertion will fail if we try to assign to
406  // a constant variable that is constant because it is read-only
407  // (such as the variable referring to a named function expression).
408  // We need to implement assignments to read-only variables.
409  // Ideally, we should do this during AST generation (by converting
410  // such assignments into expression statements); however, in general
411  // we may not be able to make the decision until past AST generation,
412  // that is when the entire program is known.
413  ASSERT(slot != NULL);
414  int index = slot->index();
415  switch (slot->type()) {
416    case Slot::PARAMETER:
417      return frame_->ParameterAt(index);
418
419    case Slot::LOCAL:
420      return frame_->LocalAt(index);
421
422    case Slot::CONTEXT: {
423      // Follow the context chain if necessary.
424      ASSERT(!tmp.is(cp));  // do not overwrite context register
425      Register context = cp;
426      int chain_length = scope()->ContextChainLength(slot->var()->scope());
427      for (int i = 0; i < chain_length; i++) {
428        // Load the closure.
429        // (All contexts, even 'with' contexts, have a closure,
430        // and it is the same for all contexts inside a function.
431        // There is no need to go to the function context first.)
432        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
433        // Load the function context (which is the incoming, outer context).
434        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
435        context = tmp;
436      }
437      // We may have a 'with' context now. Get the function context.
438      // (In fact this mov may never be the needed, since the scope analysis
439      // may not permit a direct context access in this case and thus we are
440      // always at a function context. However it is safe to dereference be-
441      // cause the function context of a function context is itself. Before
442      // deleting this mov we should try to create a counter-example first,
443      // though...)
444      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
445      return ContextOperand(tmp, index);
446    }
447
448    default:
449      UNREACHABLE();
450      return MemOperand(r0, 0);
451  }
452}
453
454
455MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
456    Slot* slot,
457    Register tmp,
458    Register tmp2,
459    JumpTarget* slow) {
460  ASSERT(slot->type() == Slot::CONTEXT);
461  Register context = cp;
462
463  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
464    if (s->num_heap_slots() > 0) {
465      if (s->calls_eval()) {
466        // Check that extension is NULL.
467        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
468        __ tst(tmp2, tmp2);
469        slow->Branch(ne);
470      }
471      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
472      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
473      context = tmp;
474    }
475  }
476  // Check that last extension is NULL.
477  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
478  __ tst(tmp2, tmp2);
479  slow->Branch(ne);
480  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
481  return ContextOperand(tmp, slot->index());
482}
483
484
485// Loads a value on TOS. If it is a boolean value, the result may have been
486// (partially) translated into branches, or it may have set the condition
487// code register. If force_cc is set, the value is forced to set the
488// condition code register and no value is pushed. If the condition code
489// register was set, has_cc() is true and cc_reg_ contains the condition to
490// test for 'true'.
491void CodeGenerator::LoadCondition(Expression* x,
492                                  JumpTarget* true_target,
493                                  JumpTarget* false_target,
494                                  bool force_cc) {
495  ASSERT(!has_cc());
496  int original_height = frame_->height();
497
498  { ConditionCodeGenState new_state(this, true_target, false_target);
499    Visit(x);
500
501    // If we hit a stack overflow, we may not have actually visited
502    // the expression.  In that case, we ensure that we have a
503    // valid-looking frame state because we will continue to generate
504    // code as we unwind the C++ stack.
505    //
506    // It's possible to have both a stack overflow and a valid frame
507    // state (eg, a subexpression overflowed, visiting it returned
508    // with a dummied frame state, and visiting this expression
509    // returned with a normal-looking state).
510    if (HasStackOverflow() &&
511        has_valid_frame() &&
512        !has_cc() &&
513        frame_->height() == original_height) {
514      true_target->Jump();
515    }
516  }
517  if (force_cc && frame_ != NULL && !has_cc()) {
518    // Convert the TOS value to a boolean in the condition code register.
519    ToBoolean(true_target, false_target);
520  }
521  ASSERT(!force_cc || !has_valid_frame() || has_cc());
522  ASSERT(!has_valid_frame() ||
523         (has_cc() && frame_->height() == original_height) ||
524         (!has_cc() && frame_->height() == original_height + 1));
525}
526
527
528void CodeGenerator::Load(Expression* expr) {
529#ifdef DEBUG
530  int original_height = frame_->height();
531#endif
532  JumpTarget true_target;
533  JumpTarget false_target;
534  LoadCondition(expr, &true_target, &false_target, false);
535
536  if (has_cc()) {
537    // Convert cc_reg_ into a boolean value.
538    JumpTarget loaded;
539    JumpTarget materialize_true;
540    materialize_true.Branch(cc_reg_);
541    frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
542    loaded.Jump();
543    materialize_true.Bind();
544    frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
545    loaded.Bind();
546    cc_reg_ = al;
547  }
548
549  if (true_target.is_linked() || false_target.is_linked()) {
550    // We have at least one condition value that has been "translated"
551    // into a branch, thus it needs to be loaded explicitly.
552    JumpTarget loaded;
553    if (frame_ != NULL) {
554      loaded.Jump();  // Don't lose the current TOS.
555    }
556    bool both = true_target.is_linked() && false_target.is_linked();
557    // Load "true" if necessary.
558    if (true_target.is_linked()) {
559      true_target.Bind();
560      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
561    }
562    // If both "true" and "false" need to be loaded jump across the code for
563    // "false".
564    if (both) {
565      loaded.Jump();
566    }
567    // Load "false" if necessary.
568    if (false_target.is_linked()) {
569      false_target.Bind();
570      frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
571    }
572    // A value is loaded on all paths reaching this point.
573    loaded.Bind();
574  }
575  ASSERT(has_valid_frame());
576  ASSERT(!has_cc());
577  ASSERT_EQ(original_height + 1, frame_->height());
578}
579
580
581void CodeGenerator::LoadGlobal() {
582  Register reg = frame_->GetTOSRegister();
583  __ ldr(reg, GlobalObject());
584  frame_->EmitPush(reg);
585}
586
587
588void CodeGenerator::LoadGlobalReceiver(Register scratch) {
589  Register reg = frame_->GetTOSRegister();
590  __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
591  __ ldr(reg,
592         FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
593  frame_->EmitPush(reg);
594}
595
596
597ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
598  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
599  ASSERT(scope()->arguments_shadow() != NULL);
600  // We don't want to do lazy arguments allocation for functions that
601  // have heap-allocated contexts, because it interfers with the
602  // uninitialized const tracking in the context objects.
603  return (scope()->num_heap_slots() > 0)
604      ? EAGER_ARGUMENTS_ALLOCATION
605      : LAZY_ARGUMENTS_ALLOCATION;
606}
607
608
609void CodeGenerator::StoreArgumentsObject(bool initial) {
610  ArgumentsAllocationMode mode = ArgumentsMode();
611  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
612
613  Comment cmnt(masm_, "[ store arguments object");
614  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
615    // When using lazy arguments allocation, we store the hole value
616    // as a sentinel indicating that the arguments object hasn't been
617    // allocated yet.
618    frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
619  } else {
620    frame_->SpillAll();
621    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
622    __ ldr(r2, frame_->Function());
623    // The receiver is below the arguments, the return address, and the
624    // frame pointer on the stack.
625    const int kReceiverDisplacement = 2 + scope()->num_parameters();
626    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
627    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
628    frame_->Adjust(3);
629    __ Push(r2, r1, r0);
630    frame_->CallStub(&stub, 3);
631    frame_->EmitPush(r0);
632  }
633
634  Variable* arguments = scope()->arguments()->var();
635  Variable* shadow = scope()->arguments_shadow()->var();
636  ASSERT(arguments != NULL && arguments->slot() != NULL);
637  ASSERT(shadow != NULL && shadow->slot() != NULL);
638  JumpTarget done;
639  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
640    // We have to skip storing into the arguments slot if it has
641    // already been written to. This can happen if the a function
642    // has a local variable named 'arguments'.
643    LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
644    Register arguments = frame_->PopToRegister();
645    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
646    __ cmp(arguments, ip);
647    done.Branch(ne);
648  }
649  StoreToSlot(arguments->slot(), NOT_CONST_INIT);
650  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
651  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
652}
653
654
655void CodeGenerator::LoadTypeofExpression(Expression* expr) {
656  // Special handling of identifiers as subexpressions of typeof.
657  Variable* variable = expr->AsVariableProxy()->AsVariable();
658  if (variable != NULL && !variable->is_this() && variable->is_global()) {
659    // For a global variable we build the property reference
660    // <global>.<variable> and perform a (regular non-contextual) property
661    // load to make sure we do not get reference errors.
662    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
663    Literal key(variable->name());
664    Property property(&global, &key, RelocInfo::kNoPosition);
665    Reference ref(this, &property);
666    ref.GetValue();
667  } else if (variable != NULL && variable->slot() != NULL) {
668    // For a variable that rewrites to a slot, we signal it is the immediate
669    // subexpression of a typeof.
670    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
671  } else {
672    // Anything else can be handled normally.
673    Load(expr);
674  }
675}
676
677
678Reference::Reference(CodeGenerator* cgen,
679                     Expression* expression,
680                     bool persist_after_get)
681    : cgen_(cgen),
682      expression_(expression),
683      type_(ILLEGAL),
684      persist_after_get_(persist_after_get) {
685  cgen->LoadReference(this);
686}
687
688
689Reference::~Reference() {
690  ASSERT(is_unloaded() || is_illegal());
691}
692
693
694void CodeGenerator::LoadReference(Reference* ref) {
695  Comment cmnt(masm_, "[ LoadReference");
696  Expression* e = ref->expression();
697  Property* property = e->AsProperty();
698  Variable* var = e->AsVariableProxy()->AsVariable();
699
700  if (property != NULL) {
701    // The expression is either a property or a variable proxy that rewrites
702    // to a property.
703    Load(property->obj());
704    if (property->key()->IsPropertyName()) {
705      ref->set_type(Reference::NAMED);
706    } else {
707      Load(property->key());
708      ref->set_type(Reference::KEYED);
709    }
710  } else if (var != NULL) {
711    // The expression is a variable proxy that does not rewrite to a
712    // property.  Global variables are treated as named property references.
713    if (var->is_global()) {
714      LoadGlobal();
715      ref->set_type(Reference::NAMED);
716    } else {
717      ASSERT(var->slot() != NULL);
718      ref->set_type(Reference::SLOT);
719    }
720  } else {
721    // Anything else is a runtime error.
722    Load(e);
723    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
724  }
725}
726
727
728void CodeGenerator::UnloadReference(Reference* ref) {
729  int size = ref->size();
730  ref->set_unloaded();
731  if (size == 0) return;
732
733  // Pop a reference from the stack while preserving TOS.
734  VirtualFrame::RegisterAllocationScope scope(this);
735  Comment cmnt(masm_, "[ UnloadReference");
736  if (size > 0) {
737    Register tos = frame_->PopToRegister();
738    frame_->Drop(size);
739    frame_->EmitPush(tos);
740  }
741}
742
743
744// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
745// register to a boolean in the condition code register. The code
746// may jump to 'false_target' in case the register converts to 'false'.
747void CodeGenerator::ToBoolean(JumpTarget* true_target,
748                              JumpTarget* false_target) {
749  // Note: The generated code snippet does not change stack variables.
750  //       Only the condition code should be set.
751  bool known_smi = frame_->KnownSmiAt(0);
752  Register tos = frame_->PopToRegister();
753
754  // Fast case checks
755
756  // Check if the value is 'false'.
757  if (!known_smi) {
758    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
759    __ cmp(tos, ip);
760    false_target->Branch(eq);
761
762    // Check if the value is 'true'.
763    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
764    __ cmp(tos, ip);
765    true_target->Branch(eq);
766
767    // Check if the value is 'undefined'.
768    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
769    __ cmp(tos, ip);
770    false_target->Branch(eq);
771  }
772
773  // Check if the value is a smi.
774  __ cmp(tos, Operand(Smi::FromInt(0)));
775
776  if (!known_smi) {
777    false_target->Branch(eq);
778    __ tst(tos, Operand(kSmiTagMask));
779    true_target->Branch(eq);
780
781    // Slow case: call the runtime.
782    frame_->EmitPush(tos);
783    frame_->CallRuntime(Runtime::kToBool, 1);
784    // Convert the result (r0) to a condition code.
785    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
786    __ cmp(r0, ip);
787  }
788
789  cc_reg_ = ne;
790}
791
792
793void CodeGenerator::GenericBinaryOperation(Token::Value op,
794                                           OverwriteMode overwrite_mode,
795                                           GenerateInlineSmi inline_smi,
796                                           int constant_rhs) {
797  // top of virtual frame: y
798  // 2nd elt. on virtual frame : x
799  // result : top of virtual frame
800
801  // Stub is entered with a call: 'return address' is in lr.
802  switch (op) {
803    case Token::ADD:
804    case Token::SUB:
805      if (inline_smi) {
806        JumpTarget done;
807        Register rhs = frame_->PopToRegister();
808        Register lhs = frame_->PopToRegister(rhs);
809        Register scratch = VirtualFrame::scratch0();
810        __ orr(scratch, rhs, Operand(lhs));
811        // Check they are both small and positive.
812        __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
813        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
814        ASSERT_EQ(0, kSmiTag);
815        if (op == Token::ADD) {
816          __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
817        } else {
818          __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
819        }
820        done.Branch(eq);
821        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
822        frame_->SpillAll();
823        frame_->CallStub(&stub, 0);
824        done.Bind();
825        frame_->EmitPush(r0);
826        break;
827      } else {
828        // Fall through!
829      }
830    case Token::BIT_OR:
831    case Token::BIT_AND:
832    case Token::BIT_XOR:
833      if (inline_smi) {
834        bool rhs_is_smi = frame_->KnownSmiAt(0);
835        bool lhs_is_smi = frame_->KnownSmiAt(1);
836        Register rhs = frame_->PopToRegister();
837        Register lhs = frame_->PopToRegister(rhs);
838        Register smi_test_reg;
839        Condition cond;
840        if (!rhs_is_smi || !lhs_is_smi) {
841          if (rhs_is_smi) {
842            smi_test_reg = lhs;
843          } else if (lhs_is_smi) {
844            smi_test_reg = rhs;
845          } else {
846            smi_test_reg = VirtualFrame::scratch0();
847            __ orr(smi_test_reg, rhs, Operand(lhs));
848          }
849          // Check they are both Smis.
850          __ tst(smi_test_reg, Operand(kSmiTagMask));
851          cond = eq;
852        } else {
853          cond = al;
854        }
855        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
856        if (op == Token::BIT_OR) {
857          __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
858        } else if (op == Token::BIT_AND) {
859          __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
860        } else {
861          ASSERT(op == Token::BIT_XOR);
862          ASSERT_EQ(0, kSmiTag);
863          __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
864        }
865        if (cond != al) {
866          JumpTarget done;
867          done.Branch(cond);
868          GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
869          frame_->SpillAll();
870          frame_->CallStub(&stub, 0);
871          done.Bind();
872        }
873        frame_->EmitPush(r0);
874        break;
875      } else {
876        // Fall through!
877      }
878    case Token::MUL:
879    case Token::DIV:
880    case Token::MOD:
881    case Token::SHL:
882    case Token::SHR:
883    case Token::SAR: {
884      Register rhs = frame_->PopToRegister();
885      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
886      GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
887      frame_->SpillAll();
888      frame_->CallStub(&stub, 0);
889      frame_->EmitPush(r0);
890      break;
891    }
892
893    case Token::COMMA: {
894      Register scratch = frame_->PopToRegister();
895      // Simply discard left value.
896      frame_->Drop();
897      frame_->EmitPush(scratch);
898      break;
899    }
900
901    default:
902      // Other cases should have been handled before this point.
903      UNREACHABLE();
904      break;
905  }
906}
907
908
909class DeferredInlineSmiOperation: public DeferredCode {
910 public:
911  DeferredInlineSmiOperation(Token::Value op,
912                             int value,
913                             bool reversed,
914                             OverwriteMode overwrite_mode,
915                             Register tos)
916      : op_(op),
917        value_(value),
918        reversed_(reversed),
919        overwrite_mode_(overwrite_mode),
920        tos_register_(tos) {
921    set_comment("[ DeferredInlinedSmiOperation");
922  }
923
924  virtual void Generate();
925
926 private:
927  Token::Value op_;
928  int value_;
929  bool reversed_;
930  OverwriteMode overwrite_mode_;
931  Register tos_register_;
932};
933
934
935
936// On entry the non-constant side of the binary operation is in tos_register_
937// and the constant smi side is nowhere.  The tos_register_ is not used by the
938// virtual frame.  On exit the answer is in the tos_register_ and the virtual
939// frame is unchanged.
940void DeferredInlineSmiOperation::Generate() {
941  VirtualFrame copied_frame(*frame_state()->frame());
942  copied_frame.SpillAll();
943
944  Register lhs = r1;
945  Register rhs = r0;
946  switch (op_) {
947    case Token::ADD: {
948      // Revert optimistic add.
949      if (reversed_) {
950        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
951        __ mov(r1, Operand(Smi::FromInt(value_)));
952      } else {
953        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
954        __ mov(r0, Operand(Smi::FromInt(value_)));
955      }
956      break;
957    }
958
959    case Token::SUB: {
960      // Revert optimistic sub.
961      if (reversed_) {
962        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
963        __ mov(r1, Operand(Smi::FromInt(value_)));
964      } else {
965        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
966        __ mov(r0, Operand(Smi::FromInt(value_)));
967      }
968      break;
969    }
970
971    // For these operations there is no optimistic operation that needs to be
972    // reverted.
973    case Token::MUL:
974    case Token::MOD:
975    case Token::BIT_OR:
976    case Token::BIT_XOR:
977    case Token::BIT_AND:
978    case Token::SHL:
979    case Token::SHR:
980    case Token::SAR: {
981      if (tos_register_.is(r1)) {
982        __ mov(r0, Operand(Smi::FromInt(value_)));
983      } else {
984        ASSERT(tos_register_.is(r0));
985        __ mov(r1, Operand(Smi::FromInt(value_)));
986      }
987      if (reversed_ == tos_register_.is(r1)) {
988          lhs = r0;
989          rhs = r1;
990      }
991      break;
992    }
993
994    default:
995      // Other cases should have been handled before this point.
996      UNREACHABLE();
997      break;
998  }
999
1000  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
1001  __ CallStub(&stub);
1002
1003  // The generic stub returns its value in r0, but that's not
1004  // necessarily what we want.  We want whatever the inlined code
1005  // expected, which is that the answer is in the same register as
1006  // the operand was.
1007  __ Move(tos_register_, r0);
1008
1009  // The tos register was not in use for the virtual frame that we
1010  // came into this function with, so we can merge back to that frame
1011  // without trashing it.
1012  copied_frame.MergeTo(frame_state()->frame());
1013}
1014
1015
1016static bool PopCountLessThanEqual2(unsigned int x) {
1017  x &= x - 1;
1018  return (x & (x - 1)) == 0;
1019}
1020
1021
1022// Returns the index of the lowest bit set.
1023static int BitPosition(unsigned x) {
1024  int bit_posn = 0;
1025  while ((x & 0xf) == 0) {
1026    bit_posn += 4;
1027    x >>= 4;
1028  }
1029  while ((x & 1) == 0) {
1030    bit_posn++;
1031    x >>= 1;
1032  }
1033  return bit_posn;
1034}
1035
1036
1037void CodeGenerator::SmiOperation(Token::Value op,
1038                                 Handle<Object> value,
1039                                 bool reversed,
1040                                 OverwriteMode mode) {
1041  int int_value = Smi::cast(*value)->value();
1042
1043  bool both_sides_are_smi = frame_->KnownSmiAt(0);
1044
1045  bool something_to_inline;
1046  switch (op) {
1047    case Token::ADD:
1048    case Token::SUB:
1049    case Token::BIT_AND:
1050    case Token::BIT_OR:
1051    case Token::BIT_XOR: {
1052      something_to_inline = true;
1053      break;
1054    }
1055    case Token::SHL: {
1056      something_to_inline = (both_sides_are_smi || !reversed);
1057      break;
1058    }
1059    case Token::SHR:
1060    case Token::SAR: {
1061      if (reversed) {
1062        something_to_inline = false;
1063      } else {
1064        something_to_inline = true;
1065      }
1066      break;
1067    }
1068    case Token::MOD: {
1069      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1070        something_to_inline = false;
1071      } else {
1072        something_to_inline = true;
1073      }
1074      break;
1075    }
1076    case Token::MUL: {
1077      if (!IsEasyToMultiplyBy(int_value)) {
1078        something_to_inline = false;
1079      } else {
1080        something_to_inline = true;
1081      }
1082      break;
1083    }
1084    default: {
1085      something_to_inline = false;
1086      break;
1087    }
1088  }
1089
1090  if (!something_to_inline) {
1091    if (!reversed) {
1092      // Push the rhs onto the virtual frame by putting it in a TOS register.
1093      Register rhs = frame_->GetTOSRegister();
1094      __ mov(rhs, Operand(value));
1095      frame_->EmitPush(rhs, TypeInfo::Smi());
1096      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
1097    } else {
1098      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
1099      // at most one pop, the rest takes place in TOS registers.
1100      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
1101      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
1102      __ mov(lhs, Operand(value));
1103      frame_->EmitPush(lhs, TypeInfo::Smi());
1104      TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
1105      frame_->EmitPush(rhs, t);
1106      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue);
1107    }
1108    return;
1109  }
1110
1111  // We move the top of stack to a register (normally no move is invoved).
1112  Register tos = frame_->PopToRegister();
1113  switch (op) {
1114    case Token::ADD: {
1115      DeferredCode* deferred =
1116          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1117
1118      __ add(tos, tos, Operand(value), SetCC);
1119      deferred->Branch(vs);
1120      if (!both_sides_are_smi) {
1121        __ tst(tos, Operand(kSmiTagMask));
1122        deferred->Branch(ne);
1123      }
1124      deferred->BindExit();
1125      frame_->EmitPush(tos);
1126      break;
1127    }
1128
1129    case Token::SUB: {
1130      DeferredCode* deferred =
1131          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1132
1133      if (reversed) {
1134        __ rsb(tos, tos, Operand(value), SetCC);
1135      } else {
1136        __ sub(tos, tos, Operand(value), SetCC);
1137      }
1138      deferred->Branch(vs);
1139      if (!both_sides_are_smi) {
1140        __ tst(tos, Operand(kSmiTagMask));
1141        deferred->Branch(ne);
1142      }
1143      deferred->BindExit();
1144      frame_->EmitPush(tos);
1145      break;
1146    }
1147
1148
1149    case Token::BIT_OR:
1150    case Token::BIT_XOR:
1151    case Token::BIT_AND: {
1152      if (both_sides_are_smi) {
1153        switch (op) {
1154          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1155          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1156          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1157          default: UNREACHABLE();
1158        }
1159        frame_->EmitPush(tos, TypeInfo::Smi());
1160      } else {
1161        DeferredCode* deferred =
1162          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1163        __ tst(tos, Operand(kSmiTagMask));
1164        deferred->Branch(ne);
1165        switch (op) {
1166          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1167          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1168          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1169          default: UNREACHABLE();
1170        }
1171        deferred->BindExit();
1172        TypeInfo result_type =
1173            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
1174        frame_->EmitPush(tos, result_type);
1175      }
1176      break;
1177    }
1178
1179    case Token::SHL:
1180      if (reversed) {
1181        ASSERT(both_sides_are_smi);
1182        int max_shift = 0;
1183        int max_result = int_value == 0 ? 1 : int_value;
1184        while (Smi::IsValid(max_result << 1)) {
1185          max_shift++;
1186          max_result <<= 1;
1187        }
1188        DeferredCode* deferred =
1189          new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
1190        // Mask off the last 5 bits of the shift operand (rhs).  This is part
1191        // of the definition of shift in JS and we know we have a Smi so we
1192        // can safely do this.  The masked version gets passed to the
1193        // deferred code, but that makes no difference.
1194        __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
1195        __ cmp(tos, Operand(Smi::FromInt(max_shift)));
1196        deferred->Branch(ge);
1197        Register scratch = VirtualFrame::scratch0();
1198        __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Untag.
1199        __ mov(tos, Operand(Smi::FromInt(int_value)));    // Load constant.
1200        __ mov(tos, Operand(tos, LSL, scratch));          // Shift constant.
1201        deferred->BindExit();
1202        TypeInfo result = TypeInfo::Integer32();
1203        frame_->EmitPush(tos, result);
1204        break;
1205      }
1206      // Fall through!
1207    case Token::SHR:
1208    case Token::SAR: {
1209      ASSERT(!reversed);
1210      TypeInfo result = TypeInfo::Integer32();
1211      Register scratch = VirtualFrame::scratch0();
1212      Register scratch2 = VirtualFrame::scratch1();
1213      int shift_value = int_value & 0x1f;  // least significant 5 bits
1214      DeferredCode* deferred =
1215        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
1216      uint32_t problematic_mask = kSmiTagMask;
1217      // For unsigned shift by zero all negative smis are problematic.
1218      bool skip_smi_test = both_sides_are_smi;
1219      if (shift_value == 0 && op == Token::SHR) {
1220        problematic_mask |= 0x80000000;
1221        skip_smi_test = false;
1222      }
1223      if (!skip_smi_test) {
1224        __ tst(tos, Operand(problematic_mask));
1225        deferred->Branch(ne);  // Go slow for problematic input.
1226      }
1227      switch (op) {
1228        case Token::SHL: {
1229          if (shift_value != 0) {
1230            int adjusted_shift = shift_value - kSmiTagSize;
1231            ASSERT(adjusted_shift >= 0);
1232            if (adjusted_shift != 0) {
1233              __ mov(scratch, Operand(tos, LSL, adjusted_shift));
1234              // Check that the *signed* result fits in a smi.
1235              __ add(scratch2, scratch, Operand(0x40000000), SetCC);
1236              deferred->Branch(mi);
1237              __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1238            } else {
1239              // Check that the *signed* result fits in a smi.
1240              __ add(scratch2, tos, Operand(0x40000000), SetCC);
1241              deferred->Branch(mi);
1242              __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1243            }
1244          }
1245          break;
1246        }
1247        case Token::SHR: {
1248          if (shift_value != 0) {
1249            __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Remove tag.
1250            // LSR by immediate 0 means shifting 32 bits.
1251            __ mov(scratch, Operand(scratch, LSR, shift_value));
1252            if (shift_value == 1) {
1253              // check that the *unsigned* result fits in a smi
1254              // neither of the two high-order bits can be set:
1255              // - 0x80000000: high bit would be lost when smi tagging
1256              // - 0x40000000: this number would convert to negative when
1257              // smi tagging these two cases can only happen with shifts
1258              // by 0 or 1 when handed a valid smi
1259              __ tst(scratch, Operand(0xc0000000));
1260              deferred->Branch(ne);
1261            } else {
1262              ASSERT(shift_value >= 2);
1263              result = TypeInfo::Smi();  // SHR by at least 2 gives a Smi.
1264            }
1265            __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1266          }
1267          break;
1268        }
1269        case Token::SAR: {
1270          // In the ARM instructions set, ASR by immediate 0 means shifting 32
1271          // bits.
1272          if (shift_value != 0) {
1273            // Do the shift and the tag removal in one operation.  If the shift
1274            // is 31 bits (the highest possible value) then we emit the
1275            // instruction as a shift by 0 which means shift arithmetically by
1276            // 32.
1277            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1278            // Put tag back.
1279            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1280            // SAR by at least 1 gives a Smi.
1281            result = TypeInfo::Smi();
1282          }
1283          break;
1284        }
1285        default: UNREACHABLE();
1286      }
1287      deferred->BindExit();
1288      frame_->EmitPush(tos, result);
1289      break;
1290    }
1291
1292    case Token::MOD: {
1293      ASSERT(!reversed);
1294      ASSERT(int_value >= 2);
1295      ASSERT(IsPowerOf2(int_value));
1296      DeferredCode* deferred =
1297          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1298      unsigned mask = (0x80000000u | kSmiTagMask);
1299      __ tst(tos, Operand(mask));
1300      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
1301      mask = (int_value << kSmiTagSize) - 1;
1302      __ and_(tos, tos, Operand(mask));
1303      deferred->BindExit();
1304      // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
1305      frame_->EmitPush(
1306          tos,
1307          both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
1308      break;
1309    }
1310
1311    case Token::MUL: {
1312      ASSERT(IsEasyToMultiplyBy(int_value));
1313      DeferredCode* deferred =
1314          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1315      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1316      max_smi_that_wont_overflow <<= kSmiTagSize;
1317      unsigned mask = 0x80000000u;
1318      while ((mask & max_smi_that_wont_overflow) == 0) {
1319        mask |= mask >> 1;
1320      }
1321      mask |= kSmiTagMask;
1322      // This does a single mask that checks for a too high value in a
1323      // conservative way and for a non-Smi.  It also filters out negative
1324      // numbers, unfortunately, but since this code is inline we prefer
1325      // brevity to comprehensiveness.
1326      __ tst(tos, Operand(mask));
1327      deferred->Branch(ne);
1328      MultiplyByKnownInt(masm_, tos, tos, int_value);
1329      deferred->BindExit();
1330      frame_->EmitPush(tos);
1331      break;
1332    }
1333
1334    default:
1335      UNREACHABLE();
1336      break;
1337  }
1338}
1339
1340
1341void CodeGenerator::Comparison(Condition cc,
1342                               Expression* left,
1343                               Expression* right,
1344                               bool strict) {
1345  VirtualFrame::RegisterAllocationScope scope(this);
1346
1347  if (left != NULL) Load(left);
1348  if (right != NULL) Load(right);
1349
1350  // sp[0] : y
1351  // sp[1] : x
1352  // result : cc register
1353
1354  // Strict only makes sense for equality comparisons.
1355  ASSERT(!strict || cc == eq);
1356
1357  Register lhs;
1358  Register rhs;
1359
1360  bool lhs_is_smi;
1361  bool rhs_is_smi;
1362
1363  // We load the top two stack positions into registers chosen by the virtual
1364  // frame.  This should keep the register shuffling to a minimum.
1365  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1366  if (cc == gt || cc == le) {
1367    cc = ReverseCondition(cc);
1368    lhs_is_smi = frame_->KnownSmiAt(0);
1369    rhs_is_smi = frame_->KnownSmiAt(1);
1370    lhs = frame_->PopToRegister();
1371    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
1372  } else {
1373    rhs_is_smi = frame_->KnownSmiAt(0);
1374    lhs_is_smi = frame_->KnownSmiAt(1);
1375    rhs = frame_->PopToRegister();
1376    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
1377  }
1378
1379  bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
1380
1381  ASSERT(rhs.is(r0) || rhs.is(r1));
1382  ASSERT(lhs.is(r0) || lhs.is(r1));
1383
1384  JumpTarget exit;
1385
1386  if (!both_sides_are_smi) {
1387    // Now we have the two sides in r0 and r1.  We flush any other registers
1388    // because the stub doesn't know about register allocation.
1389    frame_->SpillAll();
1390    Register scratch = VirtualFrame::scratch0();
1391    Register smi_test_reg;
1392    if (lhs_is_smi) {
1393      smi_test_reg = rhs;
1394    } else if (rhs_is_smi) {
1395      smi_test_reg = lhs;
1396    } else {
1397      __ orr(scratch, lhs, Operand(rhs));
1398      smi_test_reg = scratch;
1399    }
1400    __ tst(smi_test_reg, Operand(kSmiTagMask));
1401    JumpTarget smi;
1402    smi.Branch(eq);
1403
1404    // Perform non-smi comparison by stub.
1405    // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1406    // We call with 0 args because there are 0 on the stack.
1407    if (!rhs.is(r0)) {
1408      __ Swap(rhs, lhs, ip);
1409    }
1410
1411    CompareStub stub(cc, strict);
1412    frame_->CallStub(&stub, 0);
1413    __ cmp(r0, Operand(0));
1414    exit.Jump();
1415
1416    smi.Bind();
1417  }
1418
1419  // Do smi comparisons by pointer comparison.
1420  __ cmp(lhs, Operand(rhs));
1421
1422  exit.Bind();
1423  cc_reg_ = cc;
1424}
1425
1426
1427// Call the function on the stack with the given arguments.
1428void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1429                                      CallFunctionFlags flags,
1430                                      int position) {
1431  // Push the arguments ("left-to-right") on the stack.
1432  int arg_count = args->length();
1433  for (int i = 0; i < arg_count; i++) {
1434    Load(args->at(i));
1435  }
1436
1437  // Record the position for debugging purposes.
1438  CodeForSourcePosition(position);
1439
1440  // Use the shared code stub to call the function.
1441  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1442  CallFunctionStub call_function(arg_count, in_loop, flags);
1443  frame_->CallStub(&call_function, arg_count + 1);
1444
1445  // Restore context and pop function from the stack.
1446  __ ldr(cp, frame_->Context());
1447  frame_->Drop();  // discard the TOS
1448}
1449
1450
1451void CodeGenerator::CallApplyLazy(Expression* applicand,
1452                                  Expression* receiver,
1453                                  VariableProxy* arguments,
1454                                  int position) {
1455  // An optimized implementation of expressions of the form
1456  // x.apply(y, arguments).
1457  // If the arguments object of the scope has not been allocated,
1458  // and x.apply is Function.prototype.apply, this optimization
1459  // just copies y and the arguments of the current function on the
1460  // stack, as receiver and arguments, and calls x.
1461  // In the implementation comments, we call x the applicand
1462  // and y the receiver.
1463
1464  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
1465  ASSERT(arguments->IsArguments());
1466
1467  // Load applicand.apply onto the stack. This will usually
1468  // give us a megamorphic load site. Not super, but it works.
1469  Load(applicand);
1470  Handle<String> name = Factory::LookupAsciiSymbol("apply");
1471  frame_->Dup();
1472  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1473  frame_->EmitPush(r0);
1474
1475  // Load the receiver and the existing arguments object onto the
1476  // expression stack. Avoid allocating the arguments object here.
1477  Load(receiver);
1478  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
1479
1480  // At this point the top two stack elements are probably in registers
1481  // since they were just loaded.  Ensure they are in regs and get the
1482  // regs.
1483  Register receiver_reg = frame_->Peek2();
1484  Register arguments_reg = frame_->Peek();
1485
1486  // From now on the frame is spilled.
1487  frame_->SpillAll();
1488
1489  // Emit the source position information after having loaded the
1490  // receiver and the arguments.
1491  CodeForSourcePosition(position);
1492  // Contents of the stack at this point:
1493  //   sp[0]: arguments object of the current function or the hole.
1494  //   sp[1]: receiver
1495  //   sp[2]: applicand.apply
1496  //   sp[3]: applicand.
1497
1498  // Check if the arguments object has been lazily allocated
1499  // already. If so, just use that instead of copying the arguments
1500  // from the stack. This also deals with cases where a local variable
1501  // named 'arguments' has been introduced.
1502  JumpTarget slow;
1503  Label done;
1504  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1505  __ cmp(ip, arguments_reg);
1506  slow.Branch(ne);
1507
1508  Label build_args;
1509  // Get rid of the arguments object probe.
1510  frame_->Drop();
1511  // Stack now has 3 elements on it.
1512  // Contents of stack at this point:
1513  //   sp[0]: receiver - in the receiver_reg register.
1514  //   sp[1]: applicand.apply
1515  //   sp[2]: applicand.
1516
1517  // Check that the receiver really is a JavaScript object.
1518  __ BranchOnSmi(receiver_reg, &build_args);
1519  // We allow all JSObjects including JSFunctions.  As long as
1520  // JS_FUNCTION_TYPE is the last instance type and it is right
1521  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1522  // bound.
1523  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1524  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1525  __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
1526  __ b(lt, &build_args);
1527
1528  // Check that applicand.apply is Function.prototype.apply.
1529  __ ldr(r0, MemOperand(sp, kPointerSize));
1530  __ BranchOnSmi(r0, &build_args);
1531  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1532  __ b(ne, &build_args);
1533  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
1534  Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1535  __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
1536  __ cmp(r1, Operand(apply_code));
1537  __ b(ne, &build_args);
1538
1539  // Check that applicand is a function.
1540  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1541  __ BranchOnSmi(r1, &build_args);
1542  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1543  __ b(ne, &build_args);
1544
1545  // Copy the arguments to this function possibly from the
1546  // adaptor frame below it.
1547  Label invoke, adapted;
1548  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1549  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1550  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1551  __ b(eq, &adapted);
1552
1553  // No arguments adaptor frame. Copy fixed number of arguments.
1554  __ mov(r0, Operand(scope()->num_parameters()));
1555  for (int i = 0; i < scope()->num_parameters(); i++) {
1556    __ ldr(r2, frame_->ParameterAt(i));
1557    __ push(r2);
1558  }
1559  __ jmp(&invoke);
1560
1561  // Arguments adaptor frame present. Copy arguments from there, but
1562  // avoid copying too many arguments to avoid stack overflows.
1563  __ bind(&adapted);
1564  static const uint32_t kArgumentsLimit = 1 * KB;
1565  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1566  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
1567  __ mov(r3, r0);
1568  __ cmp(r0, Operand(kArgumentsLimit));
1569  __ b(gt, &build_args);
1570
1571  // Loop through the arguments pushing them onto the execution
1572  // stack. We don't inform the virtual frame of the push, so we don't
1573  // have to worry about getting rid of the elements from the virtual
1574  // frame.
1575  Label loop;
1576  // r3 is a small non-negative integer, due to the test above.
1577  __ cmp(r3, Operand(0));
1578  __ b(eq, &invoke);
1579  // Compute the address of the first argument.
1580  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
1581  __ add(r2, r2, Operand(kPointerSize));
1582  __ bind(&loop);
1583  // Post-decrement argument address by kPointerSize on each iteration.
1584  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
1585  __ push(r4);
1586  __ sub(r3, r3, Operand(1), SetCC);
1587  __ b(gt, &loop);
1588
1589  // Invoke the function.
1590  __ bind(&invoke);
1591  ParameterCount actual(r0);
1592  __ InvokeFunction(r1, actual, CALL_FUNCTION);
1593  // Drop applicand.apply and applicand from the stack, and push
1594  // the result of the function call, but leave the spilled frame
1595  // unchanged, with 3 elements, so it is correct when we compile the
1596  // slow-case code.
1597  __ add(sp, sp, Operand(2 * kPointerSize));
1598  __ push(r0);
1599  // Stack now has 1 element:
1600  //   sp[0]: result
1601  __ jmp(&done);
1602
1603  // Slow-case: Allocate the arguments object since we know it isn't
1604  // there, and fall-through to the slow-case where we call
1605  // applicand.apply.
1606  __ bind(&build_args);
1607  // Stack now has 3 elements, because we have jumped from where:
1608  //   sp[0]: receiver
1609  //   sp[1]: applicand.apply
1610  //   sp[2]: applicand.
1611  StoreArgumentsObject(false);
1612
1613  // Stack and frame now have 4 elements.
1614  slow.Bind();
1615
1616  // Generic computation of x.apply(y, args) with no special optimization.
1617  // Flip applicand.apply and applicand on the stack, so
1618  // applicand looks like the receiver of the applicand.apply call.
1619  // Then process it as a normal function call.
1620  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1621  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1622  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
1623
1624  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1625  frame_->CallStub(&call_function, 3);
1626  // The function and its two arguments have been dropped.
1627  frame_->Drop();  // Drop the receiver as well.
1628  frame_->EmitPush(r0);
1629  // Stack now has 1 element:
1630  //   sp[0]: result
1631  __ bind(&done);
1632
1633  // Restore the context register after a call.
1634  __ ldr(cp, frame_->Context());
1635}
1636
1637
1638void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1639  ASSERT(has_cc());
1640  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1641  target->Branch(cc);
1642  cc_reg_ = al;
1643}
1644
1645
1646void CodeGenerator::CheckStack() {
1647  frame_->SpillAll();
1648  Comment cmnt(masm_, "[ check stack");
1649  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1650  // Put the lr setup instruction in the delay slot.  kInstrSize is added to
1651  // the implicit 8 byte offset that always applies to operations with pc and
1652  // gives a return address 12 bytes down.
1653  masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1654  masm_->cmp(sp, Operand(ip));
1655  StackCheckStub stub;
1656  // Call the stub if lower.
1657  masm_->mov(pc,
1658             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1659                     RelocInfo::CODE_TARGET),
1660             LeaveCC,
1661             lo);
1662}
1663
1664
1665void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1666#ifdef DEBUG
1667  int original_height = frame_->height();
1668#endif
1669  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1670    Visit(statements->at(i));
1671  }
1672  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1673}
1674
1675
1676void CodeGenerator::VisitBlock(Block* node) {
1677#ifdef DEBUG
1678  int original_height = frame_->height();
1679#endif
1680  Comment cmnt(masm_, "[ Block");
1681  CodeForStatementPosition(node);
1682  node->break_target()->SetExpectedHeight();
1683  VisitStatements(node->statements());
1684  if (node->break_target()->is_linked()) {
1685    node->break_target()->Bind();
1686  }
1687  node->break_target()->Unuse();
1688  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1689}
1690
1691
1692void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1693  frame_->EmitPush(cp);
1694  frame_->EmitPush(Operand(pairs));
1695  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1696
1697  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1698  // The result is discarded.
1699}
1700
1701
1702void CodeGenerator::VisitDeclaration(Declaration* node) {
1703#ifdef DEBUG
1704  int original_height = frame_->height();
1705#endif
1706  Comment cmnt(masm_, "[ Declaration");
1707  Variable* var = node->proxy()->var();
1708  ASSERT(var != NULL);  // must have been resolved
1709  Slot* slot = var->slot();
1710
1711  // If it was not possible to allocate the variable at compile time,
1712  // we need to "declare" it at runtime to make sure it actually
1713  // exists in the local context.
1714  if (slot != NULL && slot->type() == Slot::LOOKUP) {
1715    // Variables with a "LOOKUP" slot were introduced as non-locals
1716    // during variable resolution and must have mode DYNAMIC.
1717    ASSERT(var->is_dynamic());
1718    // For now, just do a runtime call.
1719    frame_->EmitPush(cp);
1720    frame_->EmitPush(Operand(var->name()));
1721    // Declaration nodes are always declared in only two modes.
1722    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1723    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1724    frame_->EmitPush(Operand(Smi::FromInt(attr)));
1725    // Push initial value, if any.
1726    // Note: For variables we must not push an initial value (such as
1727    // 'undefined') because we may have a (legal) redeclaration and we
1728    // must not destroy the current value.
1729    if (node->mode() == Variable::CONST) {
1730      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
1731    } else if (node->fun() != NULL) {
1732      Load(node->fun());
1733    } else {
1734      frame_->EmitPush(Operand(0));
1735    }
1736
1737    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1738    // Ignore the return value (declarations are statements).
1739
1740    ASSERT(frame_->height() == original_height);
1741    return;
1742  }
1743
1744  ASSERT(!var->is_global());
1745
1746  // If we have a function or a constant, we need to initialize the variable.
1747  Expression* val = NULL;
1748  if (node->mode() == Variable::CONST) {
1749    val = new Literal(Factory::the_hole_value());
1750  } else {
1751    val = node->fun();  // NULL if we don't have a function
1752  }
1753
1754
1755  if (val != NULL) {
1756    WriteBarrierCharacter wb_info =
1757        val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
1758    if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
1759    // Set initial value.
1760    Reference target(this, node->proxy());
1761    Load(val);
1762    target.SetValue(NOT_CONST_INIT, wb_info);
1763
1764    // Get rid of the assigned value (declarations are statements).
1765    frame_->Drop();
1766  }
1767  ASSERT(frame_->height() == original_height);
1768}
1769
1770
1771void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1772#ifdef DEBUG
1773  int original_height = frame_->height();
1774#endif
1775  Comment cmnt(masm_, "[ ExpressionStatement");
1776  CodeForStatementPosition(node);
1777  Expression* expression = node->expression();
1778  expression->MarkAsStatement();
1779  Load(expression);
1780  frame_->Drop();
1781  ASSERT(frame_->height() == original_height);
1782}
1783
1784
1785void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1786#ifdef DEBUG
1787  int original_height = frame_->height();
1788#endif
1789  Comment cmnt(masm_, "// EmptyStatement");
1790  CodeForStatementPosition(node);
1791  // nothing to do
1792  ASSERT(frame_->height() == original_height);
1793}
1794
1795
1796void CodeGenerator::VisitIfStatement(IfStatement* node) {
1797#ifdef DEBUG
1798  int original_height = frame_->height();
1799#endif
1800  Comment cmnt(masm_, "[ IfStatement");
1801  // Generate different code depending on which parts of the if statement
1802  // are present or not.
1803  bool has_then_stm = node->HasThenStatement();
1804  bool has_else_stm = node->HasElseStatement();
1805
1806  CodeForStatementPosition(node);
1807
1808  JumpTarget exit;
1809  if (has_then_stm && has_else_stm) {
1810    Comment cmnt(masm_, "[ IfThenElse");
1811    JumpTarget then;
1812    JumpTarget else_;
1813    // if (cond)
1814    LoadCondition(node->condition(), &then, &else_, true);
1815    if (frame_ != NULL) {
1816      Branch(false, &else_);
1817    }
1818    // then
1819    if (frame_ != NULL || then.is_linked()) {
1820      then.Bind();
1821      Visit(node->then_statement());
1822    }
1823    if (frame_ != NULL) {
1824      exit.Jump();
1825    }
1826    // else
1827    if (else_.is_linked()) {
1828      else_.Bind();
1829      Visit(node->else_statement());
1830    }
1831
1832  } else if (has_then_stm) {
1833    Comment cmnt(masm_, "[ IfThen");
1834    ASSERT(!has_else_stm);
1835    JumpTarget then;
1836    // if (cond)
1837    LoadCondition(node->condition(), &then, &exit, true);
1838    if (frame_ != NULL) {
1839      Branch(false, &exit);
1840    }
1841    // then
1842    if (frame_ != NULL || then.is_linked()) {
1843      then.Bind();
1844      Visit(node->then_statement());
1845    }
1846
1847  } else if (has_else_stm) {
1848    Comment cmnt(masm_, "[ IfElse");
1849    ASSERT(!has_then_stm);
1850    JumpTarget else_;
1851    // if (!cond)
1852    LoadCondition(node->condition(), &exit, &else_, true);
1853    if (frame_ != NULL) {
1854      Branch(true, &exit);
1855    }
1856    // else
1857    if (frame_ != NULL || else_.is_linked()) {
1858      else_.Bind();
1859      Visit(node->else_statement());
1860    }
1861
1862  } else {
1863    Comment cmnt(masm_, "[ If");
1864    ASSERT(!has_then_stm && !has_else_stm);
1865    // if (cond)
1866    LoadCondition(node->condition(), &exit, &exit, false);
1867    if (frame_ != NULL) {
1868      if (has_cc()) {
1869        cc_reg_ = al;
1870      } else {
1871        frame_->Drop();
1872      }
1873    }
1874  }
1875
1876  // end
1877  if (exit.is_linked()) {
1878    exit.Bind();
1879  }
1880  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1881}
1882
1883
1884void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1885  Comment cmnt(masm_, "[ ContinueStatement");
1886  CodeForStatementPosition(node);
1887  node->target()->continue_target()->Jump();
1888}
1889
1890
1891void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1892  Comment cmnt(masm_, "[ BreakStatement");
1893  CodeForStatementPosition(node);
1894  node->target()->break_target()->Jump();
1895}
1896
1897
1898void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1899  frame_->SpillAll();
1900  Comment cmnt(masm_, "[ ReturnStatement");
1901
1902  CodeForStatementPosition(node);
1903  Load(node->expression());
1904  if (function_return_is_shadowed_) {
1905    frame_->EmitPop(r0);
1906    function_return_.Jump();
1907  } else {
1908    // Pop the result from the frame and prepare the frame for
1909    // returning thus making it easier to merge.
1910    frame_->PopToR0();
1911    frame_->PrepareForReturn();
1912    if (function_return_.is_bound()) {
1913      // If the function return label is already bound we reuse the
1914      // code by jumping to the return site.
1915      function_return_.Jump();
1916    } else {
1917      function_return_.Bind();
1918      GenerateReturnSequence();
1919    }
1920  }
1921}
1922
1923
1924void CodeGenerator::GenerateReturnSequence() {
1925  if (FLAG_trace) {
1926    // Push the return value on the stack as the parameter.
1927    // Runtime::TraceExit returns the parameter as it is.
1928    frame_->EmitPush(r0);
1929    frame_->CallRuntime(Runtime::kTraceExit, 1);
1930  }
1931
1932#ifdef DEBUG
1933  // Add a label for checking the size of the code used for returning.
1934  Label check_exit_codesize;
1935  masm_->bind(&check_exit_codesize);
1936#endif
1937  // Make sure that the constant pool is not emitted inside of the return
1938  // sequence.
1939  { Assembler::BlockConstPoolScope block_const_pool(masm_);
1940    // Tear down the frame which will restore the caller's frame pointer and
1941    // the link register.
1942    frame_->Exit();
1943
1944    // Here we use masm_-> instead of the __ macro to avoid the code coverage
1945    // tool from instrumenting as we rely on the code size here.
1946    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
1947    masm_->add(sp, sp, Operand(sp_delta));
1948    masm_->Jump(lr);
1949    DeleteFrame();
1950
1951#ifdef DEBUG
1952    // Check that the size of the code used for returning matches what is
1953    // expected by the debugger. If the sp_delts above cannot be encoded in
1954    // the add instruction the add will generate two instructions.
1955    int return_sequence_length =
1956        masm_->InstructionsGeneratedSince(&check_exit_codesize);
1957    CHECK(return_sequence_length ==
1958          Assembler::kJSReturnSequenceInstructions ||
1959          return_sequence_length ==
1960          Assembler::kJSReturnSequenceInstructions + 1);
1961#endif
1962  }
1963}
1964
1965
1966void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1967#ifdef DEBUG
1968  int original_height = frame_->height();
1969#endif
1970  Comment cmnt(masm_, "[ WithEnterStatement");
1971  CodeForStatementPosition(node);
1972  Load(node->expression());
1973  if (node->is_catch_block()) {
1974    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1975  } else {
1976    frame_->CallRuntime(Runtime::kPushContext, 1);
1977  }
1978#ifdef DEBUG
1979  JumpTarget verified_true;
1980  __ cmp(r0, cp);
1981  verified_true.Branch(eq);
1982  __ stop("PushContext: r0 is expected to be the same as cp");
1983  verified_true.Bind();
1984#endif
1985  // Update context local.
1986  __ str(cp, frame_->Context());
1987  ASSERT(frame_->height() == original_height);
1988}
1989
1990
1991void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1992#ifdef DEBUG
1993  int original_height = frame_->height();
1994#endif
1995  Comment cmnt(masm_, "[ WithExitStatement");
1996  CodeForStatementPosition(node);
1997  // Pop context.
1998  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
1999  // Update context local.
2000  __ str(cp, frame_->Context());
2001  ASSERT(frame_->height() == original_height);
2002}
2003
2004
2005void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
2006#ifdef DEBUG
2007  int original_height = frame_->height();
2008#endif
2009  Comment cmnt(masm_, "[ SwitchStatement");
2010  CodeForStatementPosition(node);
2011  node->break_target()->SetExpectedHeight();
2012
2013  Load(node->tag());
2014
2015  JumpTarget next_test;
2016  JumpTarget fall_through;
2017  JumpTarget default_entry;
2018  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
2019  ZoneList<CaseClause*>* cases = node->cases();
2020  int length = cases->length();
2021  CaseClause* default_clause = NULL;
2022
2023  for (int i = 0; i < length; i++) {
2024    CaseClause* clause = cases->at(i);
2025    if (clause->is_default()) {
2026      // Remember the default clause and compile it at the end.
2027      default_clause = clause;
2028      continue;
2029    }
2030
2031    Comment cmnt(masm_, "[ Case clause");
2032    // Compile the test.
2033    next_test.Bind();
2034    next_test.Unuse();
2035    // Duplicate TOS.
2036    frame_->Dup();
2037    Comparison(eq, NULL, clause->label(), true);
2038    Branch(false, &next_test);
2039
2040    // Before entering the body from the test, remove the switch value from
2041    // the stack.
2042    frame_->Drop();
2043
2044    // Label the body so that fall through is enabled.
2045    if (i > 0 && cases->at(i - 1)->is_default()) {
2046      default_exit.Bind();
2047    } else {
2048      fall_through.Bind();
2049      fall_through.Unuse();
2050    }
2051    VisitStatements(clause->statements());
2052
2053    // If control flow can fall through from the body, jump to the next body
2054    // or the end of the statement.
2055    if (frame_ != NULL) {
2056      if (i < length - 1 && cases->at(i + 1)->is_default()) {
2057        default_entry.Jump();
2058      } else {
2059        fall_through.Jump();
2060      }
2061    }
2062  }
2063
2064  // The final "test" removes the switch value.
2065  next_test.Bind();
2066  frame_->Drop();
2067
2068  // If there is a default clause, compile it.
2069  if (default_clause != NULL) {
2070    Comment cmnt(masm_, "[ Default clause");
2071    default_entry.Bind();
2072    VisitStatements(default_clause->statements());
2073    // If control flow can fall out of the default and there is a case after
2074    // it, jump to that case's body.
2075    if (frame_ != NULL && default_exit.is_bound()) {
2076      default_exit.Jump();
2077    }
2078  }
2079
2080  if (fall_through.is_linked()) {
2081    fall_through.Bind();
2082  }
2083
2084  if (node->break_target()->is_linked()) {
2085    node->break_target()->Bind();
2086  }
2087  node->break_target()->Unuse();
2088  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2089}
2090
2091
2092void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
2093#ifdef DEBUG
2094  int original_height = frame_->height();
2095#endif
2096  Comment cmnt(masm_, "[ DoWhileStatement");
2097  CodeForStatementPosition(node);
2098  node->break_target()->SetExpectedHeight();
2099  JumpTarget body(JumpTarget::BIDIRECTIONAL);
2100  IncrementLoopNesting();
2101
2102  // Label the top of the loop for the backward CFG edge.  If the test
2103  // is always true we can use the continue target, and if the test is
2104  // always false there is no need.
2105  ConditionAnalysis info = AnalyzeCondition(node->cond());
2106  switch (info) {
2107    case ALWAYS_TRUE:
2108      node->continue_target()->SetExpectedHeight();
2109      node->continue_target()->Bind();
2110      break;
2111    case ALWAYS_FALSE:
2112      node->continue_target()->SetExpectedHeight();
2113      break;
2114    case DONT_KNOW:
2115      node->continue_target()->SetExpectedHeight();
2116      body.Bind();
2117      break;
2118  }
2119
2120  CheckStack();  // TODO(1222600): ignore if body contains calls.
2121  Visit(node->body());
2122
2123  // Compile the test.
2124  switch (info) {
2125    case ALWAYS_TRUE:
2126      // If control can fall off the end of the body, jump back to the
2127      // top.
2128      if (has_valid_frame()) {
2129        node->continue_target()->Jump();
2130      }
2131      break;
2132    case ALWAYS_FALSE:
2133      // If we have a continue in the body, we only have to bind its
2134      // jump target.
2135      if (node->continue_target()->is_linked()) {
2136        node->continue_target()->Bind();
2137      }
2138      break;
2139    case DONT_KNOW:
2140      // We have to compile the test expression if it can be reached by
2141      // control flow falling out of the body or via continue.
2142      if (node->continue_target()->is_linked()) {
2143        node->continue_target()->Bind();
2144      }
2145      if (has_valid_frame()) {
2146        Comment cmnt(masm_, "[ DoWhileCondition");
2147        CodeForDoWhileConditionPosition(node);
2148        LoadCondition(node->cond(), &body, node->break_target(), true);
2149        if (has_valid_frame()) {
2150          // A invalid frame here indicates that control did not
2151          // fall out of the test expression.
2152          Branch(true, &body);
2153        }
2154      }
2155      break;
2156  }
2157
2158  if (node->break_target()->is_linked()) {
2159    node->break_target()->Bind();
2160  }
2161  DecrementLoopNesting();
2162  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2163}
2164
2165
2166void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
2167#ifdef DEBUG
2168  int original_height = frame_->height();
2169#endif
2170  Comment cmnt(masm_, "[ WhileStatement");
2171  CodeForStatementPosition(node);
2172
2173  // If the test is never true and has no side effects there is no need
2174  // to compile the test or body.
2175  ConditionAnalysis info = AnalyzeCondition(node->cond());
2176  if (info == ALWAYS_FALSE) return;
2177
2178  node->break_target()->SetExpectedHeight();
2179  IncrementLoopNesting();
2180
2181  // Label the top of the loop with the continue target for the backward
2182  // CFG edge.
2183  node->continue_target()->SetExpectedHeight();
2184  node->continue_target()->Bind();
2185
2186  if (info == DONT_KNOW) {
2187    JumpTarget body(JumpTarget::BIDIRECTIONAL);
2188    LoadCondition(node->cond(), &body, node->break_target(), true);
2189    if (has_valid_frame()) {
2190      // A NULL frame indicates that control did not fall out of the
2191      // test expression.
2192      Branch(false, node->break_target());
2193    }
2194    if (has_valid_frame() || body.is_linked()) {
2195      body.Bind();
2196    }
2197  }
2198
2199  if (has_valid_frame()) {
2200    CheckStack();  // TODO(1222600): ignore if body contains calls.
2201    Visit(node->body());
2202
2203    // If control flow can fall out of the body, jump back to the top.
2204    if (has_valid_frame()) {
2205      node->continue_target()->Jump();
2206    }
2207  }
2208  if (node->break_target()->is_linked()) {
2209    node->break_target()->Bind();
2210  }
2211  DecrementLoopNesting();
2212  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2213}
2214
2215
2216void CodeGenerator::VisitForStatement(ForStatement* node) {
2217#ifdef DEBUG
2218  int original_height = frame_->height();
2219#endif
2220  Comment cmnt(masm_, "[ ForStatement");
2221  CodeForStatementPosition(node);
2222  if (node->init() != NULL) {
2223    Visit(node->init());
2224  }
2225
2226  // If the test is never true there is no need to compile the test or
2227  // body.
2228  ConditionAnalysis info = AnalyzeCondition(node->cond());
2229  if (info == ALWAYS_FALSE) return;
2230
2231  node->break_target()->SetExpectedHeight();
2232  IncrementLoopNesting();
2233
2234  // We know that the loop index is a smi if it is not modified in the
2235  // loop body and it is checked against a constant limit in the loop
2236  // condition.  In this case, we reset the static type information of the
2237  // loop index to smi before compiling the body, the update expression, and
2238  // the bottom check of the loop condition.
2239  TypeInfoCodeGenState type_info_scope(this,
2240                                       node->is_fast_smi_loop() ?
2241                                           node->loop_variable()->slot() :
2242                                           NULL,
2243                                       TypeInfo::Smi());
2244
2245  // If there is no update statement, label the top of the loop with the
2246  // continue target, otherwise with the loop target.
2247  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2248  if (node->next() == NULL) {
2249    node->continue_target()->SetExpectedHeight();
2250    node->continue_target()->Bind();
2251  } else {
2252    node->continue_target()->SetExpectedHeight();
2253    loop.Bind();
2254  }
2255
2256  // If the test is always true, there is no need to compile it.
2257  if (info == DONT_KNOW) {
2258    JumpTarget body;
2259    LoadCondition(node->cond(), &body, node->break_target(), true);
2260    if (has_valid_frame()) {
2261      Branch(false, node->break_target());
2262    }
2263    if (has_valid_frame() || body.is_linked()) {
2264      body.Bind();
2265    }
2266  }
2267
2268  if (has_valid_frame()) {
2269    CheckStack();  // TODO(1222600): ignore if body contains calls.
2270    Visit(node->body());
2271
2272    if (node->next() == NULL) {
2273      // If there is no update statement and control flow can fall out
2274      // of the loop, jump directly to the continue label.
2275      if (has_valid_frame()) {
2276        node->continue_target()->Jump();
2277      }
2278    } else {
2279      // If there is an update statement and control flow can reach it
2280      // via falling out of the body of the loop or continuing, we
2281      // compile the update statement.
2282      if (node->continue_target()->is_linked()) {
2283        node->continue_target()->Bind();
2284      }
2285      if (has_valid_frame()) {
2286        // Record source position of the statement as this code which is
2287        // after the code for the body actually belongs to the loop
2288        // statement and not the body.
2289        CodeForStatementPosition(node);
2290        Visit(node->next());
2291        loop.Jump();
2292      }
2293    }
2294  }
2295  if (node->break_target()->is_linked()) {
2296    node->break_target()->Bind();
2297  }
2298  DecrementLoopNesting();
2299  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2300}
2301
2302
2303void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2304#ifdef DEBUG
2305  int original_height = frame_->height();
2306#endif
2307  VirtualFrame::SpilledScope spilled_scope(frame_);
2308  Comment cmnt(masm_, "[ ForInStatement");
2309  CodeForStatementPosition(node);
2310
2311  JumpTarget primitive;
2312  JumpTarget jsobject;
2313  JumpTarget fixed_array;
2314  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
2315  JumpTarget end_del_check;
2316  JumpTarget exit;
2317
2318  // Get the object to enumerate over (converted to JSObject).
2319  Load(node->enumerable());
2320
2321  // Both SpiderMonkey and kjs ignore null and undefined in contrast
2322  // to the specification.  12.6.4 mandates a call to ToObject.
2323  frame_->EmitPop(r0);
2324  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2325  __ cmp(r0, ip);
2326  exit.Branch(eq);
2327  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2328  __ cmp(r0, ip);
2329  exit.Branch(eq);
2330
2331  // Stack layout in body:
2332  // [iteration counter (Smi)]
2333  // [length of array]
2334  // [FixedArray]
2335  // [Map or 0]
2336  // [Object]
2337
2338  // Check if enumerable is already a JSObject
2339  __ tst(r0, Operand(kSmiTagMask));
2340  primitive.Branch(eq);
2341  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2342  jsobject.Branch(hs);
2343
2344  primitive.Bind();
2345  frame_->EmitPush(r0);
2346  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
2347
2348  jsobject.Bind();
2349  // Get the set of properties (as a FixedArray or Map).
2350  // r0: value to be iterated over
2351  frame_->EmitPush(r0);  // Push the object being iterated over.
2352
2353  // Check cache validity in generated code. This is a fast case for
2354  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
2355  // guarantee cache validity, call the runtime system to check cache
2356  // validity or get the property names in a fixed array.
2357  JumpTarget call_runtime;
2358  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2359  JumpTarget check_prototype;
2360  JumpTarget use_cache;
2361  __ mov(r1, Operand(r0));
2362  loop.Bind();
2363  // Check that there are no elements.
2364  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
2365  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
2366  __ cmp(r2, r4);
2367  call_runtime.Branch(ne);
2368  // Check that instance descriptors are not empty so that we can
2369  // check for an enum cache.  Leave the map in r3 for the subsequent
2370  // prototype load.
2371  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2372  __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
2373  __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
2374  __ cmp(r2, ip);
2375  call_runtime.Branch(eq);
2376  // Check that there in an enum cache in the non-empty instance
2377  // descriptors.  This is the case if the next enumeration index
2378  // field does not contain a smi.
2379  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
2380  __ tst(r2, Operand(kSmiTagMask));
2381  call_runtime.Branch(eq);
2382  // For all objects but the receiver, check that the cache is empty.
2383  // r4: empty fixed array root.
2384  __ cmp(r1, r0);
2385  check_prototype.Branch(eq);
2386  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
2387  __ cmp(r2, r4);
2388  call_runtime.Branch(ne);
2389  check_prototype.Bind();
2390  // Load the prototype from the map and loop if non-null.
2391  __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
2392  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2393  __ cmp(r1, ip);
2394  loop.Branch(ne);
2395  // The enum cache is valid.  Load the map of the object being
2396  // iterated over and use the cache for the iteration.
2397  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
2398  use_cache.Jump();
2399
2400  call_runtime.Bind();
2401  // Call the runtime to get the property names for the object.
2402  frame_->EmitPush(r0);  // push the object (slot 4) for the runtime call
2403  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2404
2405  // If we got a map from the runtime call, we can do a fast
2406  // modification check. Otherwise, we got a fixed array, and we have
2407  // to do a slow check.
2408  // r0: map or fixed array (result from call to
2409  // Runtime::kGetPropertyNamesFast)
2410  __ mov(r2, Operand(r0));
2411  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2412  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
2413  __ cmp(r1, ip);
2414  fixed_array.Branch(ne);
2415
2416  use_cache.Bind();
2417  // Get enum cache
2418  // r0: map (either the result from a call to
2419  // Runtime::kGetPropertyNamesFast or has been fetched directly from
2420  // the object)
2421  __ mov(r1, Operand(r0));
2422  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
2423  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
2424  __ ldr(r2,
2425         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2426
2427  frame_->EmitPush(r0);  // map
2428  frame_->EmitPush(r2);  // enum cache bridge cache
2429  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2430  frame_->EmitPush(r0);
2431  __ mov(r0, Operand(Smi::FromInt(0)));
2432  frame_->EmitPush(r0);
2433  entry.Jump();
2434
2435  fixed_array.Bind();
2436  __ mov(r1, Operand(Smi::FromInt(0)));
2437  frame_->EmitPush(r1);  // insert 0 in place of Map
2438  frame_->EmitPush(r0);
2439
2440  // Push the length of the array and the initial index onto the stack.
2441  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2442  frame_->EmitPush(r0);
2443  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
2444  frame_->EmitPush(r0);
2445
2446  // Condition.
2447  entry.Bind();
2448  // sp[0] : index
2449  // sp[1] : array/enum cache length
2450  // sp[2] : array or enum cache
2451  // sp[3] : 0 or map
2452  // sp[4] : enumerable
2453  // Grab the current frame's height for the break and continue
2454  // targets only after all the state is pushed on the frame.
2455  node->break_target()->SetExpectedHeight();
2456  node->continue_target()->SetExpectedHeight();
2457
2458  // Load the current count to r0, load the length to r1.
2459  __ Ldrd(r0, r1, frame_->ElementAt(0));
2460  __ cmp(r0, r1);  // compare to the array length
2461  node->break_target()->Branch(hs);
2462
2463  // Get the i'th entry of the array.
2464  __ ldr(r2, frame_->ElementAt(2));
2465  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2466  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2467
2468  // Get Map or 0.
2469  __ ldr(r2, frame_->ElementAt(3));
2470  // Check if this (still) matches the map of the enumerable.
2471  // If not, we have to filter the key.
2472  __ ldr(r1, frame_->ElementAt(4));
2473  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
2474  __ cmp(r1, Operand(r2));
2475  end_del_check.Branch(eq);
2476
2477  // Convert the entry to a string (or null if it isn't a property anymore).
2478  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
2479  frame_->EmitPush(r0);
2480  frame_->EmitPush(r3);  // push entry
2481  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
2482  __ mov(r3, Operand(r0));
2483
2484  // If the property has been removed while iterating, we just skip it.
2485  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2486  __ cmp(r3, ip);
2487  node->continue_target()->Branch(eq);
2488
2489  end_del_check.Bind();
2490  // Store the entry in the 'each' expression and take another spin in the
2491  // loop.  r3: i'th entry of the enum cache (or string there of)
2492  frame_->EmitPush(r3);  // push entry
2493  { Reference each(this, node->each());
2494    if (!each.is_illegal()) {
2495      if (each.size() > 0) {
2496        __ ldr(r0, frame_->ElementAt(each.size()));
2497        frame_->EmitPush(r0);
2498        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2499        frame_->Drop(2);
2500      } else {
2501        // If the reference was to a slot we rely on the convenient property
2502        // that it doesn't matter whether a value (eg, r3 pushed above) is
2503        // right on top of or right underneath a zero-sized reference.
2504        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
2505        frame_->Drop();
2506      }
2507    }
2508  }
2509  // Body.
2510  CheckStack();  // TODO(1222600): ignore if body contains calls.
2511  Visit(node->body());
2512
2513  // Next.  Reestablish a spilled frame in case we are coming here via
2514  // a continue in the body.
2515  node->continue_target()->Bind();
2516  frame_->SpillAll();
2517  frame_->EmitPop(r0);
2518  __ add(r0, r0, Operand(Smi::FromInt(1)));
2519  frame_->EmitPush(r0);
2520  entry.Jump();
2521
2522  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
2523  // any frame.
2524  node->break_target()->Bind();
2525  frame_->Drop(5);
2526
2527  // Exit.
2528  exit.Bind();
2529  node->continue_target()->Unuse();
2530  node->break_target()->Unuse();
2531  ASSERT(frame_->height() == original_height);
2532}
2533
2534
2535void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
2536#ifdef DEBUG
2537  int original_height = frame_->height();
2538#endif
2539  VirtualFrame::SpilledScope spilled_scope(frame_);
2540  Comment cmnt(masm_, "[ TryCatchStatement");
2541  CodeForStatementPosition(node);
2542
2543  JumpTarget try_block;
2544  JumpTarget exit;
2545
2546  try_block.Call();
2547  // --- Catch block ---
2548  frame_->EmitPush(r0);
2549
2550  // Store the caught exception in the catch variable.
2551  Variable* catch_var = node->catch_var()->var();
2552  ASSERT(catch_var != NULL && catch_var->slot() != NULL);
2553  StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
2554
2555  // Remove the exception from the stack.
2556  frame_->Drop();
2557
2558  VisitStatements(node->catch_block()->statements());
2559  if (frame_ != NULL) {
2560    exit.Jump();
2561  }
2562
2563
2564  // --- Try block ---
2565  try_block.Bind();
2566
2567  frame_->PushTryHandler(TRY_CATCH_HANDLER);
2568  int handler_height = frame_->height();
2569
2570  // Shadow the labels for all escapes from the try block, including
2571  // returns. During shadowing, the original label is hidden as the
2572  // LabelShadow and operations on the original actually affect the
2573  // shadowing label.
2574  //
2575  // We should probably try to unify the escaping labels and the return
2576  // label.
2577  int nof_escapes = node->escaping_targets()->length();
2578  List<ShadowTarget*> shadows(1 + nof_escapes);
2579
2580  // Add the shadow target for the function return.
2581  static const int kReturnShadowIndex = 0;
2582  shadows.Add(new ShadowTarget(&function_return_));
2583  bool function_return_was_shadowed = function_return_is_shadowed_;
2584  function_return_is_shadowed_ = true;
2585  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2586
2587  // Add the remaining shadow targets.
2588  for (int i = 0; i < nof_escapes; i++) {
2589    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2590  }
2591
2592  // Generate code for the statements in the try block.
2593  VisitStatements(node->try_block()->statements());
2594
2595  // Stop the introduced shadowing and count the number of required unlinks.
2596  // After shadowing stops, the original labels are unshadowed and the
2597  // LabelShadows represent the formerly shadowing labels.
2598  bool has_unlinks = false;
2599  for (int i = 0; i < shadows.length(); i++) {
2600    shadows[i]->StopShadowing();
2601    has_unlinks = has_unlinks || shadows[i]->is_linked();
2602  }
2603  function_return_is_shadowed_ = function_return_was_shadowed;
2604
2605  // Get an external reference to the handler address.
2606  ExternalReference handler_address(Top::k_handler_address);
2607
2608  // If we can fall off the end of the try block, unlink from try chain.
2609  if (has_valid_frame()) {
2610    // The next handler address is on top of the frame.  Unlink from
2611    // the handler list and drop the rest of this handler from the
2612    // frame.
2613    ASSERT(StackHandlerConstants::kNextOffset == 0);
2614    frame_->EmitPop(r1);
2615    __ mov(r3, Operand(handler_address));
2616    __ str(r1, MemOperand(r3));
2617    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2618    if (has_unlinks) {
2619      exit.Jump();
2620    }
2621  }
2622
2623  // Generate unlink code for the (formerly) shadowing labels that have been
2624  // jumped to.  Deallocate each shadow target.
2625  for (int i = 0; i < shadows.length(); i++) {
2626    if (shadows[i]->is_linked()) {
2627      // Unlink from try chain;
2628      shadows[i]->Bind();
2629      // Because we can be jumping here (to spilled code) from unspilled
2630      // code, we need to reestablish a spilled frame at this block.
2631      frame_->SpillAll();
2632
2633      // Reload sp from the top handler, because some statements that we
2634      // break from (eg, for...in) may have left stuff on the stack.
2635      __ mov(r3, Operand(handler_address));
2636      __ ldr(sp, MemOperand(r3));
2637      frame_->Forget(frame_->height() - handler_height);
2638
2639      ASSERT(StackHandlerConstants::kNextOffset == 0);
2640      frame_->EmitPop(r1);
2641      __ str(r1, MemOperand(r3));
2642      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2643
2644      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2645        frame_->PrepareForReturn();
2646      }
2647      shadows[i]->other_target()->Jump();
2648    }
2649  }
2650
2651  exit.Bind();
2652  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2653}
2654
2655
2656void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2657#ifdef DEBUG
2658  int original_height = frame_->height();
2659#endif
2660  VirtualFrame::SpilledScope spilled_scope(frame_);
2661  Comment cmnt(masm_, "[ TryFinallyStatement");
2662  CodeForStatementPosition(node);
2663
2664  // State: Used to keep track of reason for entering the finally
2665  // block. Should probably be extended to hold information for
2666  // break/continue from within the try block.
2667  enum { FALLING, THROWING, JUMPING };
2668
2669  JumpTarget try_block;
2670  JumpTarget finally_block;
2671
2672  try_block.Call();
2673
2674  frame_->EmitPush(r0);  // save exception object on the stack
2675  // In case of thrown exceptions, this is where we continue.
2676  __ mov(r2, Operand(Smi::FromInt(THROWING)));
2677  finally_block.Jump();
2678
2679  // --- Try block ---
2680  try_block.Bind();
2681
2682  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2683  int handler_height = frame_->height();
2684
2685  // Shadow the labels for all escapes from the try block, including
2686  // returns.  Shadowing hides the original label as the LabelShadow and
2687  // operations on the original actually affect the shadowing label.
2688  //
2689  // We should probably try to unify the escaping labels and the return
2690  // label.
2691  int nof_escapes = node->escaping_targets()->length();
2692  List<ShadowTarget*> shadows(1 + nof_escapes);
2693
2694  // Add the shadow target for the function return.
2695  static const int kReturnShadowIndex = 0;
2696  shadows.Add(new ShadowTarget(&function_return_));
2697  bool function_return_was_shadowed = function_return_is_shadowed_;
2698  function_return_is_shadowed_ = true;
2699  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2700
2701  // Add the remaining shadow targets.
2702  for (int i = 0; i < nof_escapes; i++) {
2703    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2704  }
2705
2706  // Generate code for the statements in the try block.
2707  VisitStatements(node->try_block()->statements());
2708
2709  // Stop the introduced shadowing and count the number of required unlinks.
2710  // After shadowing stops, the original labels are unshadowed and the
2711  // LabelShadows represent the formerly shadowing labels.
2712  int nof_unlinks = 0;
2713  for (int i = 0; i < shadows.length(); i++) {
2714    shadows[i]->StopShadowing();
2715    if (shadows[i]->is_linked()) nof_unlinks++;
2716  }
2717  function_return_is_shadowed_ = function_return_was_shadowed;
2718
2719  // Get an external reference to the handler address.
2720  ExternalReference handler_address(Top::k_handler_address);
2721
2722  // If we can fall off the end of the try block, unlink from the try
2723  // chain and set the state on the frame to FALLING.
2724  if (has_valid_frame()) {
2725    // The next handler address is on top of the frame.
2726    ASSERT(StackHandlerConstants::kNextOffset == 0);
2727    frame_->EmitPop(r1);
2728    __ mov(r3, Operand(handler_address));
2729    __ str(r1, MemOperand(r3));
2730    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2731
2732    // Fake a top of stack value (unneeded when FALLING) and set the
2733    // state in r2, then jump around the unlink blocks if any.
2734    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2735    frame_->EmitPush(r0);
2736    __ mov(r2, Operand(Smi::FromInt(FALLING)));
2737    if (nof_unlinks > 0) {
2738      finally_block.Jump();
2739    }
2740  }
2741
2742  // Generate code to unlink and set the state for the (formerly)
2743  // shadowing targets that have been jumped to.
2744  for (int i = 0; i < shadows.length(); i++) {
2745    if (shadows[i]->is_linked()) {
2746      // If we have come from the shadowed return, the return value is
2747      // in (a non-refcounted reference to) r0.  We must preserve it
2748      // until it is pushed.
2749      //
2750      // Because we can be jumping here (to spilled code) from
2751      // unspilled code, we need to reestablish a spilled frame at
2752      // this block.
2753      shadows[i]->Bind();
2754      frame_->SpillAll();
2755
2756      // Reload sp from the top handler, because some statements that
2757      // we break from (eg, for...in) may have left stuff on the
2758      // stack.
2759      __ mov(r3, Operand(handler_address));
2760      __ ldr(sp, MemOperand(r3));
2761      frame_->Forget(frame_->height() - handler_height);
2762
2763      // Unlink this handler and drop it from the frame.  The next
2764      // handler address is currently on top of the frame.
2765      ASSERT(StackHandlerConstants::kNextOffset == 0);
2766      frame_->EmitPop(r1);
2767      __ str(r1, MemOperand(r3));
2768      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2769
2770      if (i == kReturnShadowIndex) {
2771        // If this label shadowed the function return, materialize the
2772        // return value on the stack.
2773        frame_->EmitPush(r0);
2774      } else {
2775        // Fake TOS for targets that shadowed breaks and continues.
2776        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2777        frame_->EmitPush(r0);
2778      }
2779      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2780      if (--nof_unlinks > 0) {
2781        // If this is not the last unlink block, jump around the next.
2782        finally_block.Jump();
2783      }
2784    }
2785  }
2786
2787  // --- Finally block ---
2788  finally_block.Bind();
2789
2790  // Push the state on the stack.
2791  frame_->EmitPush(r2);
2792
2793  // We keep two elements on the stack - the (possibly faked) result
2794  // and the state - while evaluating the finally block.
2795  //
2796  // Generate code for the statements in the finally block.
2797  VisitStatements(node->finally_block()->statements());
2798
2799  if (has_valid_frame()) {
2800    // Restore state and return value or faked TOS.
2801    frame_->EmitPop(r2);
2802    frame_->EmitPop(r0);
2803  }
2804
2805  // Generate code to jump to the right destination for all used
2806  // formerly shadowing targets.  Deallocate each shadow target.
2807  for (int i = 0; i < shadows.length(); i++) {
2808    if (has_valid_frame() && shadows[i]->is_bound()) {
2809      JumpTarget* original = shadows[i]->other_target();
2810      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2811      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2812        JumpTarget skip;
2813        skip.Branch(ne);
2814        frame_->PrepareForReturn();
2815        original->Jump();
2816        skip.Bind();
2817      } else {
2818        original->Branch(eq);
2819      }
2820    }
2821  }
2822
2823  if (has_valid_frame()) {
2824    // Check if we need to rethrow the exception.
2825    JumpTarget exit;
2826    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2827    exit.Branch(ne);
2828
2829    // Rethrow exception.
2830    frame_->EmitPush(r0);
2831    frame_->CallRuntime(Runtime::kReThrow, 1);
2832
2833    // Done.
2834    exit.Bind();
2835  }
2836  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2837}
2838
2839
2840void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2841#ifdef DEBUG
2842  int original_height = frame_->height();
2843#endif
2844  Comment cmnt(masm_, "[ DebuggerStatament");
2845  CodeForStatementPosition(node);
2846#ifdef ENABLE_DEBUGGER_SUPPORT
2847  frame_->DebugBreak();
2848#endif
2849  // Ignore the return value.
2850  ASSERT(frame_->height() == original_height);
2851}
2852
2853
2854void CodeGenerator::InstantiateFunction(
2855    Handle<SharedFunctionInfo> function_info) {
2856  // Use the fast case closure allocation code that allocates in new
2857  // space for nested functions that don't need literals cloning.
2858  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
2859    FastNewClosureStub stub;
2860    frame_->EmitPush(Operand(function_info));
2861    frame_->SpillAll();
2862    frame_->CallStub(&stub, 1);
2863    frame_->EmitPush(r0);
2864  } else {
2865    // Create a new closure.
2866    frame_->EmitPush(cp);
2867    frame_->EmitPush(Operand(function_info));
2868    frame_->CallRuntime(Runtime::kNewClosure, 2);
2869    frame_->EmitPush(r0);
2870  }
2871}
2872
2873
2874void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2875#ifdef DEBUG
2876  int original_height = frame_->height();
2877#endif
2878  Comment cmnt(masm_, "[ FunctionLiteral");
2879
2880  // Build the function info and instantiate it.
2881  Handle<SharedFunctionInfo> function_info =
2882      Compiler::BuildFunctionInfo(node, script(), this);
2883  // Check for stack-overflow exception.
2884  if (HasStackOverflow()) {
2885    ASSERT(frame_->height() == original_height);
2886    return;
2887  }
2888  InstantiateFunction(function_info);
2889  ASSERT_EQ(original_height + 1, frame_->height());
2890}
2891
2892
2893void CodeGenerator::VisitSharedFunctionInfoLiteral(
2894    SharedFunctionInfoLiteral* node) {
2895#ifdef DEBUG
2896  int original_height = frame_->height();
2897#endif
2898  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
2899  InstantiateFunction(node->shared_function_info());
2900  ASSERT_EQ(original_height + 1, frame_->height());
2901}
2902
2903
2904void CodeGenerator::VisitConditional(Conditional* node) {
2905#ifdef DEBUG
2906  int original_height = frame_->height();
2907#endif
2908  Comment cmnt(masm_, "[ Conditional");
2909  JumpTarget then;
2910  JumpTarget else_;
2911  LoadCondition(node->condition(), &then, &else_, true);
2912  if (has_valid_frame()) {
2913    Branch(false, &else_);
2914  }
2915  if (has_valid_frame() || then.is_linked()) {
2916    then.Bind();
2917    Load(node->then_expression());
2918  }
2919  if (else_.is_linked()) {
2920    JumpTarget exit;
2921    if (has_valid_frame()) exit.Jump();
2922    else_.Bind();
2923    Load(node->else_expression());
2924    if (exit.is_linked()) exit.Bind();
2925  }
2926  ASSERT_EQ(original_height + 1, frame_->height());
2927}
2928
2929
2930void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2931  if (slot->type() == Slot::LOOKUP) {
2932    ASSERT(slot->var()->is_dynamic());
2933
2934    // JumpTargets do not yet support merging frames so the frame must be
2935    // spilled when jumping to these targets.
2936    JumpTarget slow;
2937    JumpTarget done;
2938
2939    // Generate fast case for loading from slots that correspond to
2940    // local/global variables or arguments unless they are shadowed by
2941    // eval-introduced bindings.
2942    EmitDynamicLoadFromSlotFastCase(slot,
2943                                    typeof_state,
2944                                    &slow,
2945                                    &done);
2946
2947    slow.Bind();
2948    frame_->EmitPush(cp);
2949    frame_->EmitPush(Operand(slot->var()->name()));
2950
2951    if (typeof_state == INSIDE_TYPEOF) {
2952      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2953    } else {
2954      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2955    }
2956
2957    done.Bind();
2958    frame_->EmitPush(r0);
2959
2960  } else {
2961    Register scratch = VirtualFrame::scratch0();
2962    TypeInfo info = type_info(slot);
2963    frame_->EmitPush(SlotOperand(slot, scratch), info);
2964
2965    if (slot->var()->mode() == Variable::CONST) {
2966      // Const slots may contain 'the hole' value (the constant hasn't been
2967      // initialized yet) which needs to be converted into the 'undefined'
2968      // value.
2969      Comment cmnt(masm_, "[ Unhole const");
2970      Register tos = frame_->PopToRegister();
2971      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2972      __ cmp(tos, ip);
2973      __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
2974      frame_->EmitPush(tos);
2975    }
2976  }
2977}
2978
2979
2980void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
2981                                                  TypeofState state) {
2982  VirtualFrame::RegisterAllocationScope scope(this);
2983  LoadFromSlot(slot, state);
2984
2985  // Bail out quickly if we're not using lazy arguments allocation.
2986  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
2987
2988  // ... or if the slot isn't a non-parameter arguments slot.
2989  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
2990
2991  // Load the loaded value from the stack into a register but leave it on the
2992  // stack.
2993  Register tos = frame_->Peek();
2994
2995  // If the loaded value is the sentinel that indicates that we
2996  // haven't loaded the arguments object yet, we need to do it now.
2997  JumpTarget exit;
2998  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2999  __ cmp(tos, ip);
3000  exit.Branch(ne);
3001  frame_->Drop();
3002  StoreArgumentsObject(false);
3003  exit.Bind();
3004}
3005
3006
3007void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
3008  ASSERT(slot != NULL);
3009  VirtualFrame::RegisterAllocationScope scope(this);
3010  if (slot->type() == Slot::LOOKUP) {
3011    ASSERT(slot->var()->is_dynamic());
3012
3013    // For now, just do a runtime call.
3014    frame_->EmitPush(cp);
3015    frame_->EmitPush(Operand(slot->var()->name()));
3016
3017    if (init_state == CONST_INIT) {
3018      // Same as the case for a normal store, but ignores attribute
3019      // (e.g. READ_ONLY) of context slot so that we can initialize
3020      // const properties (introduced via eval("const foo = (some
3021      // expr);")). Also, uses the current function context instead of
3022      // the top context.
3023      //
3024      // Note that we must declare the foo upon entry of eval(), via a
3025      // context slot declaration, but we cannot initialize it at the
3026      // same time, because the const declaration may be at the end of
3027      // the eval code (sigh...) and the const variable may have been
3028      // used before (where its value is 'undefined'). Thus, we can only
3029      // do the initialization when we actually encounter the expression
3030      // and when the expression operands are defined and valid, and
3031      // thus we need the split into 2 operations: declaration of the
3032      // context slot followed by initialization.
3033      frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
3034    } else {
3035      frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
3036    }
3037    // Storing a variable must keep the (new) value on the expression
3038    // stack. This is necessary for compiling assignment expressions.
3039    frame_->EmitPush(r0);
3040
3041  } else {
3042    ASSERT(!slot->var()->is_dynamic());
3043    Register scratch = VirtualFrame::scratch0();
3044    Register scratch2 = VirtualFrame::scratch1();
3045
3046    // The frame must be spilled when branching to this target.
3047    JumpTarget exit;
3048
3049    if (init_state == CONST_INIT) {
3050      ASSERT(slot->var()->mode() == Variable::CONST);
3051      // Only the first const initialization must be executed (the slot
3052      // still contains 'the hole' value). When the assignment is
3053      // executed, the code is identical to a normal store (see below).
3054      Comment cmnt(masm_, "[ Init const");
3055      __ ldr(scratch, SlotOperand(slot, scratch));
3056      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3057      __ cmp(scratch, ip);
3058      exit.Branch(ne);
3059    }
3060
3061    // We must execute the store.  Storing a variable must keep the
3062    // (new) value on the stack. This is necessary for compiling
3063    // assignment expressions.
3064    //
3065    // Note: We will reach here even with slot->var()->mode() ==
3066    // Variable::CONST because of const declarations which will
3067    // initialize consts to 'the hole' value and by doing so, end up
3068    // calling this code.  r2 may be loaded with context; used below in
3069    // RecordWrite.
3070    Register tos = frame_->Peek();
3071    __ str(tos, SlotOperand(slot, scratch));
3072    if (slot->type() == Slot::CONTEXT) {
3073      // Skip write barrier if the written value is a smi.
3074      __ tst(tos, Operand(kSmiTagMask));
3075      // We don't use tos any more after here.
3076      exit.Branch(eq);
3077      // scratch is loaded with context when calling SlotOperand above.
3078      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
3079      // We need an extra register.  Until we have a way to do that in the
3080      // virtual frame we will cheat and ask for a free TOS register.
3081      Register scratch3 = frame_->GetTOSRegister();
3082      __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
3083    }
3084    // If we definitely did not jump over the assignment, we do not need
3085    // to bind the exit label.  Doing so can defeat peephole
3086    // optimization.
3087    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
3088      exit.Bind();
3089    }
3090  }
3091}
3092
3093
3094void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
3095                                                      TypeofState typeof_state,
3096                                                      JumpTarget* slow) {
3097  // Check that no extension objects have been created by calls to
3098  // eval from the current scope to the global scope.
3099  Register tmp = frame_->scratch0();
3100  Register tmp2 = frame_->scratch1();
3101  Register context = cp;
3102  Scope* s = scope();
3103  while (s != NULL) {
3104    if (s->num_heap_slots() > 0) {
3105      if (s->calls_eval()) {
3106        frame_->SpillAll();
3107        // Check that extension is NULL.
3108        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
3109        __ tst(tmp2, tmp2);
3110        slow->Branch(ne);
3111      }
3112      // Load next context in chain.
3113      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
3114      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3115      context = tmp;
3116    }
3117    // If no outer scope calls eval, we do not need to check more
3118    // context extensions.
3119    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
3120    s = s->outer_scope();
3121  }
3122
3123  if (s->is_eval_scope()) {
3124    frame_->SpillAll();
3125    Label next, fast;
3126    __ Move(tmp, context);
3127    __ bind(&next);
3128    // Terminate at global context.
3129    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
3130    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
3131    __ cmp(tmp2, ip);
3132    __ b(eq, &fast);
3133    // Check that extension is NULL.
3134    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
3135    __ tst(tmp2, tmp2);
3136    slow->Branch(ne);
3137    // Load next context in chain.
3138    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
3139    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3140    __ b(&next);
3141    __ bind(&fast);
3142  }
3143
3144  // Load the global object.
3145  LoadGlobal();
3146  // Setup the name register and call load IC.
3147  frame_->CallLoadIC(slot->var()->name(),
3148                     typeof_state == INSIDE_TYPEOF
3149                         ? RelocInfo::CODE_TARGET
3150                         : RelocInfo::CODE_TARGET_CONTEXT);
3151}
3152
3153
3154void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
3155                                                    TypeofState typeof_state,
3156                                                    JumpTarget* slow,
3157                                                    JumpTarget* done) {
3158  // Generate fast-case code for variables that might be shadowed by
3159  // eval-introduced variables.  Eval is used a lot without
3160  // introducing variables.  In those cases, we do not want to
3161  // perform a runtime call for all variables in the scope
3162  // containing the eval.
3163  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3164    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
3165    frame_->SpillAll();
3166    done->Jump();
3167
3168  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3169    frame_->SpillAll();
3170    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
3171    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
3172    if (potential_slot != NULL) {
3173      // Generate fast case for locals that rewrite to slots.
3174      __ ldr(r0,
3175             ContextSlotOperandCheckExtensions(potential_slot,
3176                                               r1,
3177                                               r2,
3178                                               slow));
3179      if (potential_slot->var()->mode() == Variable::CONST) {
3180        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3181        __ cmp(r0, ip);
3182        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
3183      }
3184      done->Jump();
3185    } else if (rewrite != NULL) {
3186      // Generate fast case for argument loads.
3187      Property* property = rewrite->AsProperty();
3188      if (property != NULL) {
3189        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
3190        Literal* key_literal = property->key()->AsLiteral();
3191        if (obj_proxy != NULL &&
3192            key_literal != NULL &&
3193            obj_proxy->IsArguments() &&
3194            key_literal->handle()->IsSmi()) {
3195          // Load arguments object if there are no eval-introduced
3196          // variables. Then load the argument from the arguments
3197          // object using keyed load.
3198          __ ldr(r0,
3199                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
3200                                                   r1,
3201                                                   r2,
3202                                                   slow));
3203          frame_->EmitPush(r0);
3204          __ mov(r1, Operand(key_literal->handle()));
3205          frame_->EmitPush(r1);
3206          EmitKeyedLoad();
3207          done->Jump();
3208        }
3209      }
3210    }
3211  }
3212}
3213
3214
3215void CodeGenerator::VisitSlot(Slot* node) {
3216#ifdef DEBUG
3217  int original_height = frame_->height();
3218#endif
3219  Comment cmnt(masm_, "[ Slot");
3220  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3221  ASSERT_EQ(original_height + 1, frame_->height());
3222}
3223
3224
3225void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3226#ifdef DEBUG
3227  int original_height = frame_->height();
3228#endif
3229  Comment cmnt(masm_, "[ VariableProxy");
3230
3231  Variable* var = node->var();
3232  Expression* expr = var->rewrite();
3233  if (expr != NULL) {
3234    Visit(expr);
3235  } else {
3236    ASSERT(var->is_global());
3237    Reference ref(this, node);
3238    ref.GetValue();
3239  }
3240  ASSERT_EQ(original_height + 1, frame_->height());
3241}
3242
3243
3244void CodeGenerator::VisitLiteral(Literal* node) {
3245#ifdef DEBUG
3246  int original_height = frame_->height();
3247#endif
3248  Comment cmnt(masm_, "[ Literal");
3249  Register reg = frame_->GetTOSRegister();
3250  bool is_smi = node->handle()->IsSmi();
3251  __ mov(reg, Operand(node->handle()));
3252  frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
3253  ASSERT_EQ(original_height + 1, frame_->height());
3254}
3255
3256
3257void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3258#ifdef DEBUG
3259  int original_height = frame_->height();
3260#endif
3261  Comment cmnt(masm_, "[ RexExp Literal");
3262
3263  Register tmp = VirtualFrame::scratch0();
3264  // Free up a TOS register that can be used to push the literal.
3265  Register literal = frame_->GetTOSRegister();
3266
3267  // Retrieve the literal array and check the allocated entry.
3268
3269  // Load the function of this activation.
3270  __ ldr(tmp, frame_->Function());
3271
3272  // Load the literals array of the function.
3273  __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
3274
3275  // Load the literal at the ast saved index.
3276  int literal_offset =
3277      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3278  __ ldr(literal, FieldMemOperand(tmp, literal_offset));
3279
3280  JumpTarget done;
3281  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3282  __ cmp(literal, ip);
3283  // This branch locks the virtual frame at the done label to match the
3284  // one we have here, where the literal register is not on the stack and
3285  // nothing is spilled.
3286  done.Branch(ne);
3287
3288  // If the entry is undefined we call the runtime system to compute
3289  // the literal.
3290  // literal array  (0)
3291  frame_->EmitPush(tmp);
3292  // literal index  (1)
3293  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3294  // RegExp pattern (2)
3295  frame_->EmitPush(Operand(node->pattern()));
3296  // RegExp flags   (3)
3297  frame_->EmitPush(Operand(node->flags()));
3298  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3299  __ Move(literal, r0);
3300
3301  // This call to bind will get us back to the virtual frame we had before
3302  // where things are not spilled and the literal register is not on the stack.
3303  done.Bind();
3304  // Push the literal.
3305  frame_->EmitPush(literal);
3306  ASSERT_EQ(original_height + 1, frame_->height());
3307}
3308
3309
3310void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3311#ifdef DEBUG
3312  int original_height = frame_->height();
3313#endif
3314  Comment cmnt(masm_, "[ ObjectLiteral");
3315
3316  Register literal = frame_->GetTOSRegister();
3317  // Load the function of this activation.
3318  __ ldr(literal, frame_->Function());
3319  // Literal array.
3320  __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
3321  frame_->EmitPush(literal);
3322  // Literal index.
3323  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3324  // Constant properties.
3325  frame_->EmitPush(Operand(node->constant_properties()));
3326  // Should the object literal have fast elements?
3327  frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3328  if (node->depth() > 1) {
3329    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
3330  } else {
3331    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
3332  }
3333  frame_->EmitPush(r0);  // save the result
3334  for (int i = 0; i < node->properties()->length(); i++) {
3335    // At the start of each iteration, the top of stack contains
3336    // the newly created object literal.
3337    ObjectLiteral::Property* property = node->properties()->at(i);
3338    Literal* key = property->key();
3339    Expression* value = property->value();
3340    switch (property->kind()) {
3341      case ObjectLiteral::Property::CONSTANT:
3342        break;
3343      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3344        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3345        // else fall through
3346      case ObjectLiteral::Property::COMPUTED:
3347        if (key->handle()->IsSymbol()) {
3348          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
3349          Load(value);
3350          frame_->PopToR0();
3351          // Fetch the object literal.
3352          frame_->SpillAllButCopyTOSToR1();
3353          __ mov(r2, Operand(key->handle()));
3354          frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
3355          break;
3356        }
3357        // else fall through
3358      case ObjectLiteral::Property::PROTOTYPE: {
3359        frame_->Dup();
3360        Load(key);
3361        Load(value);
3362        frame_->CallRuntime(Runtime::kSetProperty, 3);
3363        break;
3364      }
3365      case ObjectLiteral::Property::SETTER: {
3366        frame_->Dup();
3367        Load(key);
3368        frame_->EmitPush(Operand(Smi::FromInt(1)));
3369        Load(value);
3370        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3371        break;
3372      }
3373      case ObjectLiteral::Property::GETTER: {
3374        frame_->Dup();
3375        Load(key);
3376        frame_->EmitPush(Operand(Smi::FromInt(0)));
3377        Load(value);
3378        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3379        break;
3380      }
3381    }
3382  }
3383  ASSERT_EQ(original_height + 1, frame_->height());
3384}
3385
3386
3387void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3388#ifdef DEBUG
3389  int original_height = frame_->height();
3390#endif
3391  Comment cmnt(masm_, "[ ArrayLiteral");
3392
3393  Register tos = frame_->GetTOSRegister();
3394  // Load the function of this activation.
3395  __ ldr(tos, frame_->Function());
3396  // Load the literals array of the function.
3397  __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
3398  frame_->EmitPush(tos);
3399  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
3400  frame_->EmitPush(Operand(node->constant_elements()));
3401  int length = node->values()->length();
3402  if (node->depth() > 1) {
3403    frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
3404  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
3405    frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
3406  } else {
3407    FastCloneShallowArrayStub stub(length);
3408    frame_->CallStub(&stub, 3);
3409  }
3410  frame_->EmitPush(r0);  // save the result
3411  // r0: created object literal
3412
3413  // Generate code to set the elements in the array that are not
3414  // literals.
3415  for (int i = 0; i < node->values()->length(); i++) {
3416    Expression* value = node->values()->at(i);
3417
3418    // If value is a literal the property value is already set in the
3419    // boilerplate object.
3420    if (value->AsLiteral() != NULL) continue;
3421    // If value is a materialized literal the property value is already set
3422    // in the boilerplate object if it is simple.
3423    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
3424
3425    // The property must be set by generated code.
3426    Load(value);
3427    frame_->PopToR0();
3428    // Fetch the object literal.
3429    frame_->SpillAllButCopyTOSToR1();
3430
3431    // Get the elements array.
3432    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3433
3434    // Write to the indexed properties array.
3435    int offset = i * kPointerSize + FixedArray::kHeaderSize;
3436    __ str(r0, FieldMemOperand(r1, offset));
3437
3438    // Update the write barrier for the array address.
3439    __ RecordWrite(r1, Operand(offset), r3, r2);
3440  }
3441  ASSERT_EQ(original_height + 1, frame_->height());
3442}
3443
3444
3445void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3446#ifdef DEBUG
3447  int original_height = frame_->height();
3448#endif
3449  // Call runtime routine to allocate the catch extension object and
3450  // assign the exception value to the catch variable.
3451  Comment cmnt(masm_, "[ CatchExtensionObject");
3452  Load(node->key());
3453  Load(node->value());
3454  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
3455  frame_->EmitPush(r0);
3456  ASSERT_EQ(original_height + 1, frame_->height());
3457}
3458
3459
3460void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3461#ifdef DEBUG
3462  int original_height = frame_->height();
3463#endif
3464  Comment cmnt(masm(), "[ Variable Assignment");
3465  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3466  ASSERT(var != NULL);
3467  Slot* slot = var->slot();
3468  ASSERT(slot != NULL);
3469
3470  // Evaluate the right-hand side.
3471  if (node->is_compound()) {
3472    // For a compound assignment the right-hand side is a binary operation
3473    // between the current property value and the actual right-hand side.
3474    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
3475
3476    // Perform the binary operation.
3477    Literal* literal = node->value()->AsLiteral();
3478    bool overwrite_value =
3479        (node->value()->AsBinaryOperation() != NULL &&
3480         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3481    if (literal != NULL && literal->handle()->IsSmi()) {
3482      SmiOperation(node->binary_op(),
3483                   literal->handle(),
3484                   false,
3485                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3486    } else {
3487      GenerateInlineSmi inline_smi =
3488          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3489      if (literal != NULL) {
3490        ASSERT(!literal->handle()->IsSmi());
3491        inline_smi = DONT_GENERATE_INLINE_SMI;
3492      }
3493      Load(node->value());
3494      GenericBinaryOperation(node->binary_op(),
3495                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3496                             inline_smi);
3497    }
3498  } else {
3499    Load(node->value());
3500  }
3501
3502  // Perform the assignment.
3503  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3504    CodeForSourcePosition(node->position());
3505    StoreToSlot(slot,
3506                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
3507  }
3508  ASSERT_EQ(original_height + 1, frame_->height());
3509}
3510
3511
3512void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
3513#ifdef DEBUG
3514  int original_height = frame_->height();
3515#endif
3516  Comment cmnt(masm(), "[ Named Property Assignment");
3517  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3518  Property* prop = node->target()->AsProperty();
3519  ASSERT(var == NULL || (prop == NULL && var->is_global()));
3520
3521  // Initialize name and evaluate the receiver sub-expression if necessary. If
3522  // the receiver is trivial it is not placed on the stack at this point, but
3523  // loaded whenever actually needed.
3524  Handle<String> name;
3525  bool is_trivial_receiver = false;
3526  if (var != NULL) {
3527    name = var->name();
3528  } else {
3529    Literal* lit = prop->key()->AsLiteral();
3530    ASSERT_NOT_NULL(lit);
3531    name = Handle<String>::cast(lit->handle());
3532    // Do not materialize the receiver on the frame if it is trivial.
3533    is_trivial_receiver = prop->obj()->IsTrivial();
3534    if (!is_trivial_receiver) Load(prop->obj());
3535  }
3536
3537  // Change to slow case in the beginning of an initialization block to
3538  // avoid the quadratic behavior of repeatedly adding fast properties.
3539  if (node->starts_initialization_block()) {
3540    // Initialization block consists of assignments of the form expr.x = ..., so
3541    // this will never be an assignment to a variable, so there must be a
3542    // receiver object.
3543    ASSERT_EQ(NULL, var);
3544    if (is_trivial_receiver) {
3545      Load(prop->obj());
3546    } else {
3547      frame_->Dup();
3548    }
3549    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3550  }
3551
3552  // Change to fast case at the end of an initialization block. To prepare for
3553  // that add an extra copy of the receiver to the frame, so that it can be
3554  // converted back to fast case after the assignment.
3555  if (node->ends_initialization_block() && !is_trivial_receiver) {
3556    frame_->Dup();
3557  }
3558
3559  // Stack layout:
3560  // [tos]   : receiver (only materialized if non-trivial)
3561  // [tos+1] : receiver if at the end of an initialization block
3562
3563  // Evaluate the right-hand side.
3564  if (node->is_compound()) {
3565    // For a compound assignment the right-hand side is a binary operation
3566    // between the current property value and the actual right-hand side.
3567    if (is_trivial_receiver) {
3568      Load(prop->obj());
3569    } else if (var != NULL) {
3570      LoadGlobal();
3571    } else {
3572      frame_->Dup();
3573    }
3574    EmitNamedLoad(name, var != NULL);
3575
3576    // Perform the binary operation.
3577    Literal* literal = node->value()->AsLiteral();
3578    bool overwrite_value =
3579        (node->value()->AsBinaryOperation() != NULL &&
3580         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3581    if (literal != NULL && literal->handle()->IsSmi()) {
3582      SmiOperation(node->binary_op(),
3583                   literal->handle(),
3584                   false,
3585                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3586    } else {
3587      GenerateInlineSmi inline_smi =
3588          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3589      if (literal != NULL) {
3590        ASSERT(!literal->handle()->IsSmi());
3591        inline_smi = DONT_GENERATE_INLINE_SMI;
3592      }
3593      Load(node->value());
3594      GenericBinaryOperation(node->binary_op(),
3595                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3596                             inline_smi);
3597    }
3598  } else {
3599    // For non-compound assignment just load the right-hand side.
3600    Load(node->value());
3601  }
3602
3603  // Stack layout:
3604  // [tos]   : value
3605  // [tos+1] : receiver (only materialized if non-trivial)
3606  // [tos+2] : receiver if at the end of an initialization block
3607
3608  // Perform the assignment.  It is safe to ignore constants here.
3609  ASSERT(var == NULL || var->mode() != Variable::CONST);
3610  ASSERT_NE(Token::INIT_CONST, node->op());
3611  if (is_trivial_receiver) {
3612    // Load the receiver and swap with the value.
3613    Load(prop->obj());
3614    Register t0 = frame_->PopToRegister();
3615    Register t1 = frame_->PopToRegister(t0);
3616    frame_->EmitPush(t0);
3617    frame_->EmitPush(t1);
3618  }
3619  CodeForSourcePosition(node->position());
3620  bool is_contextual = (var != NULL);
3621  EmitNamedStore(name, is_contextual);
3622  frame_->EmitPush(r0);
3623
3624  // Change to fast case at the end of an initialization block.
3625  if (node->ends_initialization_block()) {
3626    ASSERT_EQ(NULL, var);
3627    // The argument to the runtime call is the receiver.
3628    if (is_trivial_receiver) {
3629      Load(prop->obj());
3630    } else {
3631      // A copy of the receiver is below the value of the assignment. Swap
3632      // the receiver and the value of the assignment expression.
3633      Register t0 = frame_->PopToRegister();
3634      Register t1 = frame_->PopToRegister(t0);
3635      frame_->EmitPush(t0);
3636      frame_->EmitPush(t1);
3637    }
3638    frame_->CallRuntime(Runtime::kToFastProperties, 1);
3639  }
3640
3641  // Stack layout:
3642  // [tos]   : result
3643
3644  ASSERT_EQ(original_height + 1, frame_->height());
3645}
3646
3647
3648void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
3649#ifdef DEBUG
3650  int original_height = frame_->height();
3651#endif
3652  Comment cmnt(masm_, "[ Keyed Property Assignment");
3653  Property* prop = node->target()->AsProperty();
3654  ASSERT_NOT_NULL(prop);
3655
3656  // Evaluate the receiver subexpression.
3657  Load(prop->obj());
3658
3659  WriteBarrierCharacter wb_info;
3660
3661  // Change to slow case in the beginning of an initialization block to
3662  // avoid the quadratic behavior of repeatedly adding fast properties.
3663  if (node->starts_initialization_block()) {
3664    frame_->Dup();
3665    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3666  }
3667
3668  // Change to fast case at the end of an initialization block. To prepare for
3669  // that add an extra copy of the receiver to the frame, so that it can be
3670  // converted back to fast case after the assignment.
3671  if (node->ends_initialization_block()) {
3672    frame_->Dup();
3673  }
3674
3675  // Evaluate the key subexpression.
3676  Load(prop->key());
3677
3678  // Stack layout:
3679  // [tos]   : key
3680  // [tos+1] : receiver
3681  // [tos+2] : receiver if at the end of an initialization block
3682  //
3683  // Evaluate the right-hand side.
3684  if (node->is_compound()) {
3685    // For a compound assignment the right-hand side is a binary operation
3686    // between the current property value and the actual right-hand side.
3687    // Duplicate receiver and key for loading the current property value.
3688    frame_->Dup2();
3689    EmitKeyedLoad();
3690    frame_->EmitPush(r0);
3691
3692    // Perform the binary operation.
3693    Literal* literal = node->value()->AsLiteral();
3694    bool overwrite_value =
3695        (node->value()->AsBinaryOperation() != NULL &&
3696         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3697    if (literal != NULL && literal->handle()->IsSmi()) {
3698      SmiOperation(node->binary_op(),
3699                   literal->handle(),
3700                   false,
3701                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3702    } else {
3703      GenerateInlineSmi inline_smi =
3704          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3705      if (literal != NULL) {
3706        ASSERT(!literal->handle()->IsSmi());
3707        inline_smi = DONT_GENERATE_INLINE_SMI;
3708      }
3709      Load(node->value());
3710      GenericBinaryOperation(node->binary_op(),
3711                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3712                             inline_smi);
3713    }
3714    wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
3715  } else {
3716    // For non-compound assignment just load the right-hand side.
3717    Load(node->value());
3718    wb_info = node->value()->AsLiteral() != NULL ?
3719        NEVER_NEWSPACE :
3720        (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
3721  }
3722
3723  // Stack layout:
3724  // [tos]   : value
3725  // [tos+1] : key
3726  // [tos+2] : receiver
3727  // [tos+3] : receiver if at the end of an initialization block
3728
3729  // Perform the assignment.  It is safe to ignore constants here.
3730  ASSERT(node->op() != Token::INIT_CONST);
3731  CodeForSourcePosition(node->position());
3732  EmitKeyedStore(prop->key()->type(), wb_info);
3733  frame_->EmitPush(r0);
3734
3735  // Stack layout:
3736  // [tos]   : result
3737  // [tos+1] : receiver if at the end of an initialization block
3738
3739  // Change to fast case at the end of an initialization block.
3740  if (node->ends_initialization_block()) {
3741    // The argument to the runtime call is the extra copy of the receiver,
3742    // which is below the value of the assignment.  Swap the receiver and
3743    // the value of the assignment expression.
3744    Register t0 = frame_->PopToRegister();
3745    Register t1 = frame_->PopToRegister(t0);
3746    frame_->EmitPush(t1);
3747    frame_->EmitPush(t0);
3748    frame_->CallRuntime(Runtime::kToFastProperties, 1);
3749  }
3750
3751  // Stack layout:
3752  // [tos]   : result
3753
3754  ASSERT_EQ(original_height + 1, frame_->height());
3755}
3756
3757
3758void CodeGenerator::VisitAssignment(Assignment* node) {
3759  VirtualFrame::RegisterAllocationScope scope(this);
3760#ifdef DEBUG
3761  int original_height = frame_->height();
3762#endif
3763  Comment cmnt(masm_, "[ Assignment");
3764
3765  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3766  Property* prop = node->target()->AsProperty();
3767
3768  if (var != NULL && !var->is_global()) {
3769    EmitSlotAssignment(node);
3770
3771  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
3772             (var != NULL && var->is_global())) {
3773    // Properties whose keys are property names and global variables are
3774    // treated as named property references.  We do not need to consider
3775    // global 'this' because it is not a valid left-hand side.
3776    EmitNamedPropertyAssignment(node);
3777
3778  } else if (prop != NULL) {
3779    // Other properties (including rewritten parameters for a function that
3780    // uses arguments) are keyed property assignments.
3781    EmitKeyedPropertyAssignment(node);
3782
3783  } else {
3784    // Invalid left-hand side.
3785    Load(node->target());
3786    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
3787    // The runtime call doesn't actually return but the code generator will
3788    // still generate code and expects a certain frame height.
3789    frame_->EmitPush(r0);
3790  }
3791  ASSERT_EQ(original_height + 1, frame_->height());
3792}
3793
3794
3795void CodeGenerator::VisitThrow(Throw* node) {
3796#ifdef DEBUG
3797  int original_height = frame_->height();
3798#endif
3799  Comment cmnt(masm_, "[ Throw");
3800
3801  Load(node->exception());
3802  CodeForSourcePosition(node->position());
3803  frame_->CallRuntime(Runtime::kThrow, 1);
3804  frame_->EmitPush(r0);
3805  ASSERT_EQ(original_height + 1, frame_->height());
3806}
3807
3808
3809void CodeGenerator::VisitProperty(Property* node) {
3810#ifdef DEBUG
3811  int original_height = frame_->height();
3812#endif
3813  Comment cmnt(masm_, "[ Property");
3814
3815  { Reference property(this, node);
3816    property.GetValue();
3817  }
3818  ASSERT_EQ(original_height + 1, frame_->height());
3819}
3820
3821
3822void CodeGenerator::VisitCall(Call* node) {
3823#ifdef DEBUG
3824  int original_height = frame_->height();
3825#endif
3826  Comment cmnt(masm_, "[ Call");
3827
3828  Expression* function = node->expression();
3829  ZoneList<Expression*>* args = node->arguments();
3830
3831  // Standard function call.
3832  // Check if the function is a variable or a property.
3833  Variable* var = function->AsVariableProxy()->AsVariable();
3834  Property* property = function->AsProperty();
3835
3836  // ------------------------------------------------------------------------
3837  // Fast-case: Use inline caching.
3838  // ---
3839  // According to ECMA-262, section 11.2.3, page 44, the function to call
3840  // must be resolved after the arguments have been evaluated. The IC code
3841  // automatically handles this by loading the arguments before the function
3842  // is resolved in cache misses (this also holds for megamorphic calls).
3843  // ------------------------------------------------------------------------
3844
3845  if (var != NULL && var->is_possibly_eval()) {
3846    // ----------------------------------
3847    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
3848    // ----------------------------------
3849
3850    // In a call to eval, we first call %ResolvePossiblyDirectEval to
3851    // resolve the function we need to call and the receiver of the
3852    // call.  Then we call the resolved function using the given
3853    // arguments.
3854
3855    // Prepare stack for call to resolved function.
3856    Load(function);
3857
3858    // Allocate a frame slot for the receiver.
3859    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
3860
3861    // Load the arguments.
3862    int arg_count = args->length();
3863    for (int i = 0; i < arg_count; i++) {
3864      Load(args->at(i));
3865    }
3866
3867    VirtualFrame::SpilledScope spilled_scope(frame_);
3868
3869    // If we know that eval can only be shadowed by eval-introduced
3870    // variables we attempt to load the global eval function directly
3871    // in generated code. If we succeed, there is no need to perform a
3872    // context lookup in the runtime system.
3873    JumpTarget done;
3874    if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
3875      ASSERT(var->slot()->type() == Slot::LOOKUP);
3876      JumpTarget slow;
3877      // Prepare the stack for the call to
3878      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
3879      // function, the first argument to the eval call and the
3880      // receiver.
3881      LoadFromGlobalSlotCheckExtensions(var->slot(),
3882                                        NOT_INSIDE_TYPEOF,
3883                                        &slow);
3884      frame_->EmitPush(r0);
3885      if (arg_count > 0) {
3886        __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3887        frame_->EmitPush(r1);
3888      } else {
3889        frame_->EmitPush(r2);
3890      }
3891      __ ldr(r1, frame_->Receiver());
3892      frame_->EmitPush(r1);
3893
3894      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
3895
3896      done.Jump();
3897      slow.Bind();
3898    }
3899
3900    // Prepare the stack for the call to ResolvePossiblyDirectEval by
3901    // pushing the loaded function, the first argument to the eval
3902    // call and the receiver.
3903    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
3904    frame_->EmitPush(r1);
3905    if (arg_count > 0) {
3906      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3907      frame_->EmitPush(r1);
3908    } else {
3909      frame_->EmitPush(r2);
3910    }
3911    __ ldr(r1, frame_->Receiver());
3912    frame_->EmitPush(r1);
3913
3914    // Resolve the call.
3915    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
3916
3917    // If we generated fast-case code bind the jump-target where fast
3918    // and slow case merge.
3919    if (done.is_linked()) done.Bind();
3920
3921    // Touch up stack with the right values for the function and the receiver.
3922    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
3923    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
3924
3925    // Call the function.
3926    CodeForSourcePosition(node->position());
3927
3928    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3929    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
3930    frame_->CallStub(&call_function, arg_count + 1);
3931
3932    __ ldr(cp, frame_->Context());
3933    // Remove the function from the stack.
3934    frame_->Drop();
3935    frame_->EmitPush(r0);
3936
3937  } else if (var != NULL && !var->is_this() && var->is_global()) {
3938    // ----------------------------------
3939    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
3940    // ----------------------------------
3941    // Pass the global object as the receiver and let the IC stub
3942    // patch the stack to use the global proxy as 'this' in the
3943    // invoked function.
3944    LoadGlobal();
3945
3946    // Load the arguments.
3947    int arg_count = args->length();
3948    for (int i = 0; i < arg_count; i++) {
3949      Load(args->at(i));
3950    }
3951
3952    VirtualFrame::SpilledScope spilled_scope(frame_);
3953    // Setup the name register and call the IC initialization code.
3954    __ mov(r2, Operand(var->name()));
3955    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3956    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3957    CodeForSourcePosition(node->position());
3958    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3959                           arg_count + 1);
3960    __ ldr(cp, frame_->Context());
3961    frame_->EmitPush(r0);
3962
3963  } else if (var != NULL && var->slot() != NULL &&
3964             var->slot()->type() == Slot::LOOKUP) {
3965    VirtualFrame::SpilledScope spilled_scope(frame_);
3966    // ----------------------------------
3967    // JavaScript examples:
3968    //
3969    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
3970    //
3971    //  function f() {};
3972    //  function g() {
3973    //    eval(...);
3974    //    f();  // f could be in extension object.
3975    //  }
3976    // ----------------------------------
3977
3978    // JumpTargets do not yet support merging frames so the frame must be
3979    // spilled when jumping to these targets.
3980    JumpTarget slow, done;
3981
3982    // Generate fast case for loading functions from slots that
3983    // correspond to local/global variables or arguments unless they
3984    // are shadowed by eval-introduced bindings.
3985    EmitDynamicLoadFromSlotFastCase(var->slot(),
3986                                    NOT_INSIDE_TYPEOF,
3987                                    &slow,
3988                                    &done);
3989
3990    slow.Bind();
3991    // Load the function
3992    frame_->EmitPush(cp);
3993    __ mov(r0, Operand(var->name()));
3994    frame_->EmitPush(r0);
3995    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
3996    // r0: slot value; r1: receiver
3997
3998    // Load the receiver.
3999    frame_->EmitPush(r0);  // function
4000    frame_->EmitPush(r1);  // receiver
4001
4002    // If fast case code has been generated, emit code to push the
4003    // function and receiver and have the slow path jump around this
4004    // code.
4005    if (done.is_linked()) {
4006      JumpTarget call;
4007      call.Jump();
4008      done.Bind();
4009      frame_->EmitPush(r0);  // function
4010      LoadGlobalReceiver(r1);  // receiver
4011      call.Bind();
4012    }
4013
4014    // Call the function. At this point, everything is spilled but the
4015    // function and receiver are in r0 and r1.
4016    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4017    frame_->EmitPush(r0);
4018
4019  } else if (property != NULL) {
4020    // Check if the key is a literal string.
4021    Literal* literal = property->key()->AsLiteral();
4022
4023    if (literal != NULL && literal->handle()->IsSymbol()) {
4024      // ------------------------------------------------------------------
4025      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
4026      // ------------------------------------------------------------------
4027
4028      Handle<String> name = Handle<String>::cast(literal->handle());
4029
4030      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
4031          name->IsEqualTo(CStrVector("apply")) &&
4032          args->length() == 2 &&
4033          args->at(1)->AsVariableProxy() != NULL &&
4034          args->at(1)->AsVariableProxy()->IsArguments()) {
4035        // Use the optimized Function.prototype.apply that avoids
4036        // allocating lazily allocated arguments objects.
4037        CallApplyLazy(property->obj(),
4038                      args->at(0),
4039                      args->at(1)->AsVariableProxy(),
4040                      node->position());
4041
4042      } else {
4043        Load(property->obj());  // Receiver.
4044        // Load the arguments.
4045        int arg_count = args->length();
4046        for (int i = 0; i < arg_count; i++) {
4047          Load(args->at(i));
4048        }
4049
4050        VirtualFrame::SpilledScope spilled_scope(frame_);
4051        // Set the name register and call the IC initialization code.
4052        __ mov(r2, Operand(name));
4053        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4054        Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
4055        CodeForSourcePosition(node->position());
4056        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4057        __ ldr(cp, frame_->Context());
4058        frame_->EmitPush(r0);
4059      }
4060
4061    } else {
4062      // -------------------------------------------
4063      // JavaScript example: 'array[index](1, 2, 3)'
4064      // -------------------------------------------
4065      VirtualFrame::SpilledScope spilled_scope(frame_);
4066
4067      Load(property->obj());
4068      if (property->is_synthetic()) {
4069        Load(property->key());
4070        EmitKeyedLoad();
4071        // Put the function below the receiver.
4072        // Use the global receiver.
4073        frame_->EmitPush(r0);  // Function.
4074        LoadGlobalReceiver(r0);
4075        // Call the function.
4076        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
4077        frame_->EmitPush(r0);
4078      } else {
4079        // Load the arguments.
4080        int arg_count = args->length();
4081        for (int i = 0; i < arg_count; i++) {
4082          Load(args->at(i));
4083        }
4084
4085        // Set the name register and call the IC initialization code.
4086        Load(property->key());
4087        frame_->EmitPop(r2);  // Function name.
4088
4089        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4090        Handle<Code> stub = ComputeKeyedCallInitialize(arg_count, in_loop);
4091        CodeForSourcePosition(node->position());
4092        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4093        __ ldr(cp, frame_->Context());
4094        frame_->EmitPush(r0);
4095      }
4096    }
4097
4098  } else {
4099    // ----------------------------------
4100    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
4101    // ----------------------------------
4102
4103    // Load the function.
4104    Load(function);
4105
4106    VirtualFrame::SpilledScope spilled_scope(frame_);
4107
4108    // Pass the global proxy as the receiver.
4109    LoadGlobalReceiver(r0);
4110
4111    // Call the function.
4112    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4113    frame_->EmitPush(r0);
4114  }
4115  ASSERT_EQ(original_height + 1, frame_->height());
4116}
4117
4118
4119void CodeGenerator::VisitCallNew(CallNew* node) {
4120#ifdef DEBUG
4121  int original_height = frame_->height();
4122#endif
4123  Comment cmnt(masm_, "[ CallNew");
4124
4125  // According to ECMA-262, section 11.2.2, page 44, the function
4126  // expression in new calls must be evaluated before the
4127  // arguments. This is different from ordinary calls, where the
4128  // actual function to call is resolved after the arguments have been
4129  // evaluated.
4130
4131  // Compute function to call and use the global object as the
4132  // receiver. There is no need to use the global proxy here because
4133  // it will always be replaced with a newly allocated object.
4134  Load(node->expression());
4135  LoadGlobal();
4136
4137  // Push the arguments ("left-to-right") on the stack.
4138  ZoneList<Expression*>* args = node->arguments();
4139  int arg_count = args->length();
4140  for (int i = 0; i < arg_count; i++) {
4141    Load(args->at(i));
4142  }
4143
4144  VirtualFrame::SpilledScope spilled_scope(frame_);
4145
4146  // r0: the number of arguments.
4147  __ mov(r0, Operand(arg_count));
4148  // Load the function into r1 as per calling convention.
4149  __ ldr(r1, frame_->ElementAt(arg_count + 1));
4150
4151  // Call the construct call builtin that handles allocation and
4152  // constructor invocation.
4153  CodeForSourcePosition(node->position());
4154  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
4155  frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
4156
4157  // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
4158  __ str(r0, frame_->Top());
4159  ASSERT_EQ(original_height + 1, frame_->height());
4160}
4161
4162
4163void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4164  VirtualFrame::SpilledScope spilled_scope(frame_);
4165  ASSERT(args->length() == 1);
4166  JumpTarget leave, null, function, non_function_constructor;
4167
4168  // Load the object into r0.
4169  Load(args->at(0));
4170  frame_->EmitPop(r0);
4171
4172  // If the object is a smi, we return null.
4173  __ tst(r0, Operand(kSmiTagMask));
4174  null.Branch(eq);
4175
4176  // Check that the object is a JS object but take special care of JS
4177  // functions to make sure they have 'Function' as their class.
4178  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
4179  null.Branch(lt);
4180
4181  // As long as JS_FUNCTION_TYPE is the last instance type and it is
4182  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4183  // LAST_JS_OBJECT_TYPE.
4184  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4185  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4186  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
4187  function.Branch(eq);
4188
4189  // Check if the constructor in the map is a function.
4190  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
4191  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
4192  non_function_constructor.Branch(ne);
4193
4194  // The r0 register now contains the constructor function. Grab the
4195  // instance class name from there.
4196  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
4197  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
4198  frame_->EmitPush(r0);
4199  leave.Jump();
4200
4201  // Functions have class 'Function'.
4202  function.Bind();
4203  __ mov(r0, Operand(Factory::function_class_symbol()));
4204  frame_->EmitPush(r0);
4205  leave.Jump();
4206
4207  // Objects with a non-function constructor have class 'Object'.
4208  non_function_constructor.Bind();
4209  __ mov(r0, Operand(Factory::Object_symbol()));
4210  frame_->EmitPush(r0);
4211  leave.Jump();
4212
4213  // Non-JS objects have class null.
4214  null.Bind();
4215  __ LoadRoot(r0, Heap::kNullValueRootIndex);
4216  frame_->EmitPush(r0);
4217
4218  // All done.
4219  leave.Bind();
4220}
4221
4222
4223void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4224  VirtualFrame::SpilledScope spilled_scope(frame_);
4225  ASSERT(args->length() == 1);
4226  JumpTarget leave;
4227  Load(args->at(0));
4228  frame_->EmitPop(r0);  // r0 contains object.
4229  // if (object->IsSmi()) return the object.
4230  __ tst(r0, Operand(kSmiTagMask));
4231  leave.Branch(eq);
4232  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4233  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
4234  leave.Branch(ne);
4235  // Load the value.
4236  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
4237  leave.Bind();
4238  frame_->EmitPush(r0);
4239}
4240
4241
4242void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4243  VirtualFrame::SpilledScope spilled_scope(frame_);
4244  ASSERT(args->length() == 2);
4245  JumpTarget leave;
4246  Load(args->at(0));    // Load the object.
4247  Load(args->at(1));    // Load the value.
4248  frame_->EmitPop(r0);  // r0 contains value
4249  frame_->EmitPop(r1);  // r1 contains object
4250  // if (object->IsSmi()) return object.
4251  __ tst(r1, Operand(kSmiTagMask));
4252  leave.Branch(eq);
4253  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4254  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
4255  leave.Branch(ne);
4256  // Store the value.
4257  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
4258  // Update the write barrier.
4259  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
4260  // Leave.
4261  leave.Bind();
4262  frame_->EmitPush(r0);
4263}
4264
4265
4266void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4267  ASSERT(args->length() == 1);
4268  Load(args->at(0));
4269  Register reg = frame_->PopToRegister();
4270  __ tst(reg, Operand(kSmiTagMask));
4271  cc_reg_ = eq;
4272}
4273
4274
4275void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
4276  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
4277  ASSERT_EQ(args->length(), 3);
4278#ifdef ENABLE_LOGGING_AND_PROFILING
4279  if (ShouldGenerateLog(args->at(0))) {
4280    Load(args->at(1));
4281    Load(args->at(2));
4282    frame_->CallRuntime(Runtime::kLog, 2);
4283  }
4284#endif
4285  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4286}
4287
4288
4289void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4290  ASSERT(args->length() == 1);
4291  Load(args->at(0));
4292  Register reg = frame_->PopToRegister();
4293  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
4294  cc_reg_ = eq;
4295}
4296
4297
4298// Generates the Math.pow method.
4299void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4300  ASSERT(args->length() == 2);
4301  Load(args->at(0));
4302  Load(args->at(1));
4303
4304  if (!CpuFeatures::IsSupported(VFP3)) {
4305    frame_->CallRuntime(Runtime::kMath_pow, 2);
4306    frame_->EmitPush(r0);
4307  } else {
4308    CpuFeatures::Scope scope(VFP3);
4309    JumpTarget runtime, done;
4310    Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
4311
4312    Register scratch1 = VirtualFrame::scratch0();
4313    Register scratch2 = VirtualFrame::scratch1();
4314
4315    // Get base and exponent to registers.
4316    Register exponent = frame_->PopToRegister();
4317    Register base = frame_->PopToRegister(exponent);
4318    Register heap_number_map = no_reg;
4319
4320    // Set the frame for the runtime jump target. The code below jumps to the
4321    // jump target label so the frame needs to be established before that.
4322    ASSERT(runtime.entry_frame() == NULL);
4323    runtime.set_entry_frame(frame_);
4324
4325    __ BranchOnNotSmi(exponent, &exponent_nonsmi);
4326    __ BranchOnNotSmi(base, &base_nonsmi);
4327
4328    heap_number_map = r6;
4329    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4330
4331    // Exponent is a smi and base is a smi. Get the smi value into vfp register
4332    // d1.
4333    __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
4334    __ b(&powi);
4335
4336    __ bind(&base_nonsmi);
4337    // Exponent is smi and base is non smi. Get the double value from the base
4338    // into vfp register d1.
4339    __ ObjectToDoubleVFPRegister(base, d1,
4340                                 scratch1, scratch2, heap_number_map, s0,
4341                                 runtime.entry_label());
4342
4343    __ bind(&powi);
4344
4345    // Load 1.0 into d0.
4346    __ mov(scratch2, Operand(0x3ff00000));
4347    __ mov(scratch1, Operand(0));
4348    __ vmov(d0, scratch1, scratch2);
4349
4350    // Get the absolute untagged value of the exponent and use that for the
4351    // calculation.
4352    __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
4353    __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi);  // Negate if negative.
4354    __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.
4355
4356    // Run through all the bits in the exponent. The result is calculated in d0
4357    // and d1 holds base^(bit^2).
4358    Label more_bits;
4359    __ bind(&more_bits);
4360    __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
4361    __ vmul(d0, d0, d1, cs);  // Multiply with base^(bit^2) if bit is set.
4362    __ vmul(d1, d1, d1, ne);  // Don't bother calculating next d1 if done.
4363    __ b(ne, &more_bits);
4364
4365    // If exponent is positive we are done.
4366    __ cmp(exponent, Operand(0));
4367    __ b(ge, &allocate_return);
4368
4369    // If exponent is negative result is 1/result (d2 already holds 1.0 in that
4370    // case). However if d0 has reached infinity this will not provide the
4371    // correct result, so call runtime if that is the case.
4372    __ mov(scratch2, Operand(0x7FF00000));
4373    __ mov(scratch1, Operand(0));
4374    __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
4375    __ vcmp(d0, d1);
4376    __ vmrs(pc);
4377    runtime.Branch(eq);  // d0 reached infinity.
4378    __ vdiv(d0, d2, d0);
4379    __ b(&allocate_return);
4380
4381    __ bind(&exponent_nonsmi);
4382    // Special handling of raising to the power of -0.5 and 0.5. First check
4383    // that the value is a heap number and that the lower bits (which for both
4384    // values are zero).
4385    heap_number_map = r6;
4386    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4387    __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
4388    __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
4389    __ cmp(scratch1, heap_number_map);
4390    runtime.Branch(ne);
4391    __ tst(scratch2, scratch2);
4392    runtime.Branch(ne);
4393
4394    // Load the higher bits (which contains the floating point exponent).
4395    __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
4396
4397    // Compare exponent with -0.5.
4398    __ cmp(scratch1, Operand(0xbfe00000));
4399    __ b(ne, &not_minus_half);
4400
4401    // Get the double value from the base into vfp register d0.
4402    __ ObjectToDoubleVFPRegister(base, d0,
4403                                 scratch1, scratch2, heap_number_map, s0,
4404                                 runtime.entry_label(),
4405                                 AVOID_NANS_AND_INFINITIES);
4406
4407    // Load 1.0 into d2.
4408    __ mov(scratch2, Operand(0x3ff00000));
4409    __ mov(scratch1, Operand(0));
4410    __ vmov(d2, scratch1, scratch2);
4411
4412    // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x).
4413    __ vdiv(d0, d2, d0);
4414    __ vsqrt(d0, d0);
4415
4416    __ b(&allocate_return);
4417
4418    __ bind(&not_minus_half);
4419    // Compare exponent with 0.5.
4420    __ cmp(scratch1, Operand(0x3fe00000));
4421    runtime.Branch(ne);
4422
4423      // Get the double value from the base into vfp register d0.
4424    __ ObjectToDoubleVFPRegister(base, d0,
4425                                 scratch1, scratch2, heap_number_map, s0,
4426                                 runtime.entry_label(),
4427                                 AVOID_NANS_AND_INFINITIES);
4428    __ vsqrt(d0, d0);
4429
4430    __ bind(&allocate_return);
4431    Register scratch3 = r5;
4432    __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
4433                                   heap_number_map, runtime.entry_label());
4434    __ mov(base, scratch3);
4435    done.Jump();
4436
4437    runtime.Bind();
4438
4439    // Push back the arguments again for the runtime call.
4440    frame_->EmitPush(base);
4441    frame_->EmitPush(exponent);
4442    frame_->CallRuntime(Runtime::kMath_pow, 2);
4443    __ Move(base, r0);
4444
4445    done.Bind();
4446    frame_->EmitPush(base);
4447  }
4448}
4449
4450
4451// Generates the Math.sqrt method.
4452void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4453  ASSERT(args->length() == 1);
4454  Load(args->at(0));
4455
4456  if (!CpuFeatures::IsSupported(VFP3)) {
4457    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4458    frame_->EmitPush(r0);
4459  } else {
4460    CpuFeatures::Scope scope(VFP3);
4461    JumpTarget runtime, done;
4462
4463    Register scratch1 = VirtualFrame::scratch0();
4464    Register scratch2 = VirtualFrame::scratch1();
4465
4466    // Get the value from the frame.
4467    Register tos = frame_->PopToRegister();
4468
4469    // Set the frame for the runtime jump target. The code below jumps to the
4470    // jump target label so the frame needs to be established before that.
4471    ASSERT(runtime.entry_frame() == NULL);
4472    runtime.set_entry_frame(frame_);
4473
4474    Register heap_number_map = r6;
4475    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4476
4477    // Get the double value from the heap number into vfp register d0.
4478    __ ObjectToDoubleVFPRegister(tos, d0,
4479                                 scratch1, scratch2, heap_number_map, s0,
4480                                 runtime.entry_label());
4481
4482    // Calculate the square root of d0 and place result in a heap number object.
4483    __ vsqrt(d0, d0);
4484    __ AllocateHeapNumberWithValue(
4485        tos, d0, scratch1, scratch2, heap_number_map, runtime.entry_label());
4486    done.Jump();
4487
4488    runtime.Bind();
4489    // Push back the argument again for the runtime call.
4490    frame_->EmitPush(tos);
4491    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4492    __ Move(tos, r0);
4493
4494    done.Bind();
4495    frame_->EmitPush(tos);
4496  }
4497}
4498
4499
4500class DeferredStringCharCodeAt : public DeferredCode {
4501 public:
4502  DeferredStringCharCodeAt(Register object,
4503                           Register index,
4504                           Register scratch,
4505                           Register result)
4506      : result_(result),
4507        char_code_at_generator_(object,
4508                                index,
4509                                scratch,
4510                                result,
4511                                &need_conversion_,
4512                                &need_conversion_,
4513                                &index_out_of_range_,
4514                                STRING_INDEX_IS_NUMBER) {}
4515
4516  StringCharCodeAtGenerator* fast_case_generator() {
4517    return &char_code_at_generator_;
4518  }
4519
4520  virtual void Generate() {
4521    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4522    char_code_at_generator_.GenerateSlow(masm(), call_helper);
4523
4524    __ bind(&need_conversion_);
4525    // Move the undefined value into the result register, which will
4526    // trigger conversion.
4527    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
4528    __ jmp(exit_label());
4529
4530    __ bind(&index_out_of_range_);
4531    // When the index is out of range, the spec requires us to return
4532    // NaN.
4533    __ LoadRoot(result_, Heap::kNanValueRootIndex);
4534    __ jmp(exit_label());
4535  }
4536
4537 private:
4538  Register result_;
4539
4540  Label need_conversion_;
4541  Label index_out_of_range_;
4542
4543  StringCharCodeAtGenerator char_code_at_generator_;
4544};
4545
4546
4547// This generates code that performs a String.prototype.charCodeAt() call
4548// or returns a smi in order to trigger conversion.
4549void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
4550  VirtualFrame::SpilledScope spilled_scope(frame_);
4551  Comment(masm_, "[ GenerateStringCharCodeAt");
4552  ASSERT(args->length() == 2);
4553
4554  Load(args->at(0));
4555  Load(args->at(1));
4556
4557  Register index = r1;
4558  Register object = r2;
4559
4560  frame_->EmitPop(r1);
4561  frame_->EmitPop(r2);
4562
4563  // We need two extra registers.
4564  Register scratch = r3;
4565  Register result = r0;
4566
4567  DeferredStringCharCodeAt* deferred =
4568      new DeferredStringCharCodeAt(object,
4569                                   index,
4570                                   scratch,
4571                                   result);
4572  deferred->fast_case_generator()->GenerateFast(masm_);
4573  deferred->BindExit();
4574  frame_->EmitPush(result);
4575}
4576
4577
4578class DeferredStringCharFromCode : public DeferredCode {
4579 public:
4580  DeferredStringCharFromCode(Register code,
4581                             Register result)
4582      : char_from_code_generator_(code, result) {}
4583
4584  StringCharFromCodeGenerator* fast_case_generator() {
4585    return &char_from_code_generator_;
4586  }
4587
4588  virtual void Generate() {
4589    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4590    char_from_code_generator_.GenerateSlow(masm(), call_helper);
4591  }
4592
4593 private:
4594  StringCharFromCodeGenerator char_from_code_generator_;
4595};
4596
4597
4598// Generates code for creating a one-char string from a char code.
4599void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
4600  VirtualFrame::SpilledScope spilled_scope(frame_);
4601  Comment(masm_, "[ GenerateStringCharFromCode");
4602  ASSERT(args->length() == 1);
4603
4604  Load(args->at(0));
4605
4606  Register code = r1;
4607  Register result = r0;
4608
4609  frame_->EmitPop(code);
4610
4611  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
4612      code, result);
4613  deferred->fast_case_generator()->GenerateFast(masm_);
4614  deferred->BindExit();
4615  frame_->EmitPush(result);
4616}
4617
4618
4619class DeferredStringCharAt : public DeferredCode {
4620 public:
4621  DeferredStringCharAt(Register object,
4622                       Register index,
4623                       Register scratch1,
4624                       Register scratch2,
4625                       Register result)
4626      : result_(result),
4627        char_at_generator_(object,
4628                           index,
4629                           scratch1,
4630                           scratch2,
4631                           result,
4632                           &need_conversion_,
4633                           &need_conversion_,
4634                           &index_out_of_range_,
4635                           STRING_INDEX_IS_NUMBER) {}
4636
4637  StringCharAtGenerator* fast_case_generator() {
4638    return &char_at_generator_;
4639  }
4640
4641  virtual void Generate() {
4642    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4643    char_at_generator_.GenerateSlow(masm(), call_helper);
4644
4645    __ bind(&need_conversion_);
4646    // Move smi zero into the result register, which will trigger
4647    // conversion.
4648    __ mov(result_, Operand(Smi::FromInt(0)));
4649    __ jmp(exit_label());
4650
4651    __ bind(&index_out_of_range_);
4652    // When the index is out of range, the spec requires us to return
4653    // the empty string.
4654    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
4655    __ jmp(exit_label());
4656  }
4657
4658 private:
4659  Register result_;
4660
4661  Label need_conversion_;
4662  Label index_out_of_range_;
4663
4664  StringCharAtGenerator char_at_generator_;
4665};
4666
4667
4668// This generates code that performs a String.prototype.charAt() call
4669// or returns a smi in order to trigger conversion.
4670void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
4671  VirtualFrame::SpilledScope spilled_scope(frame_);
4672  Comment(masm_, "[ GenerateStringCharAt");
4673  ASSERT(args->length() == 2);
4674
4675  Load(args->at(0));
4676  Load(args->at(1));
4677
4678  Register index = r1;
4679  Register object = r2;
4680
4681  frame_->EmitPop(r1);
4682  frame_->EmitPop(r2);
4683
4684  // We need three extra registers.
4685  Register scratch1 = r3;
4686  Register scratch2 = r4;
4687  Register result = r0;
4688
4689  DeferredStringCharAt* deferred =
4690      new DeferredStringCharAt(object,
4691                               index,
4692                               scratch1,
4693                               scratch2,
4694                               result);
4695  deferred->fast_case_generator()->GenerateFast(masm_);
4696  deferred->BindExit();
4697  frame_->EmitPush(result);
4698}
4699
4700
4701void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
4702  ASSERT(args->length() == 1);
4703  Load(args->at(0));
4704  JumpTarget answer;
4705  // We need the CC bits to come out as not_equal in the case where the
4706  // object is a smi.  This can't be done with the usual test opcode so
4707  // we use XOR to get the right CC bits.
4708  Register possible_array = frame_->PopToRegister();
4709  Register scratch = VirtualFrame::scratch0();
4710  __ and_(scratch, possible_array, Operand(kSmiTagMask));
4711  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4712  answer.Branch(ne);
4713  // It is a heap object - get the map. Check if the object is a JS array.
4714  __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
4715  answer.Bind();
4716  cc_reg_ = eq;
4717}
4718
4719
4720void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
4721  ASSERT(args->length() == 1);
4722  Load(args->at(0));
4723  JumpTarget answer;
4724  // We need the CC bits to come out as not_equal in the case where the
4725  // object is a smi.  This can't be done with the usual test opcode so
4726  // we use XOR to get the right CC bits.
4727  Register possible_regexp = frame_->PopToRegister();
4728  Register scratch = VirtualFrame::scratch0();
4729  __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
4730  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4731  answer.Branch(ne);
4732  // It is a heap object - get the map. Check if the object is a regexp.
4733  __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
4734  answer.Bind();
4735  cc_reg_ = eq;
4736}
4737
4738
4739void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
4740  // This generates a fast version of:
4741  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
4742  ASSERT(args->length() == 1);
4743  Load(args->at(0));
4744  Register possible_object = frame_->PopToRegister();
4745  __ tst(possible_object, Operand(kSmiTagMask));
4746  false_target()->Branch(eq);
4747
4748  __ LoadRoot(ip, Heap::kNullValueRootIndex);
4749  __ cmp(possible_object, ip);
4750  true_target()->Branch(eq);
4751
4752  Register map_reg = VirtualFrame::scratch0();
4753  __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
4754  // Undetectable objects behave like undefined when tested with typeof.
4755  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
4756  __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
4757  false_target()->Branch(ne);
4758
4759  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4760  __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
4761  false_target()->Branch(lt);
4762  __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
4763  cc_reg_ = le;
4764}
4765
4766
4767void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
4768  // This generates a fast version of:
4769  // (%_ClassOf(arg) === 'Function')
4770  ASSERT(args->length() == 1);
4771  Load(args->at(0));
4772  Register possible_function = frame_->PopToRegister();
4773  __ tst(possible_function, Operand(kSmiTagMask));
4774  false_target()->Branch(eq);
4775  Register map_reg = VirtualFrame::scratch0();
4776  Register scratch = VirtualFrame::scratch1();
4777  __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
4778  cc_reg_ = eq;
4779}
4780
4781
4782void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
4783  ASSERT(args->length() == 1);
4784  Load(args->at(0));
4785  Register possible_undetectable = frame_->PopToRegister();
4786  __ tst(possible_undetectable, Operand(kSmiTagMask));
4787  false_target()->Branch(eq);
4788  Register scratch = VirtualFrame::scratch0();
4789  __ ldr(scratch,
4790         FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
4791  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
4792  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
4793  cc_reg_ = ne;
4794}
4795
4796
4797void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
4798  ASSERT(args->length() == 0);
4799
4800  Register scratch0 = VirtualFrame::scratch0();
4801  Register scratch1 = VirtualFrame::scratch1();
4802  // Get the frame pointer for the calling frame.
4803  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4804
4805  // Skip the arguments adaptor frame if it exists.
4806  __ ldr(scratch1,
4807         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4808  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4809  __ ldr(scratch0,
4810         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
4811
4812  // Check the marker in the calling frame.
4813  __ ldr(scratch1,
4814         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
4815  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4816  cc_reg_ = eq;
4817}
4818
4819
4820void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
4821  ASSERT(args->length() == 0);
4822
4823  Register tos = frame_->GetTOSRegister();
4824  Register scratch0 = VirtualFrame::scratch0();
4825  Register scratch1 = VirtualFrame::scratch1();
4826
4827  // Check if the calling frame is an arguments adaptor frame.
4828  __ ldr(scratch0,
4829         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4830  __ ldr(scratch1,
4831         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4832  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4833
4834  // Get the number of formal parameters.
4835  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
4836
4837  // Arguments adaptor case: Read the arguments length from the
4838  // adaptor frame.
4839  __ ldr(tos,
4840         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
4841         eq);
4842
4843  frame_->EmitPush(tos);
4844}
4845
4846
4847void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
4848  VirtualFrame::SpilledScope spilled_scope(frame_);
4849  ASSERT(args->length() == 1);
4850
4851  // Satisfy contract with ArgumentsAccessStub:
4852  // Load the key into r1 and the formal parameters count into r0.
4853  Load(args->at(0));
4854  frame_->EmitPop(r1);
4855  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
4856
4857  // Call the shared stub to get to arguments[key].
4858  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
4859  frame_->CallStub(&stub, 0);
4860  frame_->EmitPush(r0);
4861}
4862
4863
4864void CodeGenerator::GenerateRandomHeapNumber(
4865    ZoneList<Expression*>* args) {
4866  VirtualFrame::SpilledScope spilled_scope(frame_);
4867  ASSERT(args->length() == 0);
4868
4869  Label slow_allocate_heapnumber;
4870  Label heapnumber_allocated;
4871
4872  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4873  __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
4874  __ jmp(&heapnumber_allocated);
4875
4876  __ bind(&slow_allocate_heapnumber);
4877  // To allocate a heap number, and ensure that it is not a smi, we
4878  // call the runtime function FUnaryMinus on 0, returning the double
4879  // -0.0. A new, distinct heap number is returned each time.
4880  __ mov(r0, Operand(Smi::FromInt(0)));
4881  __ push(r0);
4882  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
4883  __ mov(r4, Operand(r0));
4884
4885  __ bind(&heapnumber_allocated);
4886
4887  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
4888  // by computing:
4889  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4890  if (CpuFeatures::IsSupported(VFP3)) {
4891    __ PrepareCallCFunction(0, r1);
4892    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
4893
4894    CpuFeatures::Scope scope(VFP3);
4895    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
4896    // Create this constant using mov/orr to avoid PC relative load.
4897    __ mov(r1, Operand(0x41000000));
4898    __ orr(r1, r1, Operand(0x300000));
4899    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
4900    __ vmov(d7, r0, r1);
4901    // Move 0x4130000000000000 to VFP.
4902    __ mov(r0, Operand(0));
4903    __ vmov(d8, r0, r1);
4904    // Subtract and store the result in the heap number.
4905    __ vsub(d7, d7, d8);
4906    __ sub(r0, r4, Operand(kHeapObjectTag));
4907    __ vstr(d7, r0, HeapNumber::kValueOffset);
4908    frame_->EmitPush(r4);
4909  } else {
4910    __ mov(r0, Operand(r4));
4911    __ PrepareCallCFunction(1, r1);
4912    __ CallCFunction(
4913        ExternalReference::fill_heap_number_with_random_function(), 1);
4914    frame_->EmitPush(r0);
4915  }
4916}
4917
4918
4919void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4920  ASSERT_EQ(2, args->length());
4921
4922  Load(args->at(0));
4923  Load(args->at(1));
4924
4925  StringAddStub stub(NO_STRING_ADD_FLAGS);
4926  frame_->SpillAll();
4927  frame_->CallStub(&stub, 2);
4928  frame_->EmitPush(r0);
4929}
4930
4931
4932void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
4933  ASSERT_EQ(3, args->length());
4934
4935  Load(args->at(0));
4936  Load(args->at(1));
4937  Load(args->at(2));
4938
4939  SubStringStub stub;
4940  frame_->SpillAll();
4941  frame_->CallStub(&stub, 3);
4942  frame_->EmitPush(r0);
4943}
4944
4945
4946void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
4947  ASSERT_EQ(2, args->length());
4948
4949  Load(args->at(0));
4950  Load(args->at(1));
4951
4952  StringCompareStub stub;
4953  frame_->SpillAll();
4954  frame_->CallStub(&stub, 2);
4955  frame_->EmitPush(r0);
4956}
4957
4958
4959void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
4960  ASSERT_EQ(4, args->length());
4961
4962  Load(args->at(0));
4963  Load(args->at(1));
4964  Load(args->at(2));
4965  Load(args->at(3));
4966  RegExpExecStub stub;
4967  frame_->SpillAll();
4968  frame_->CallStub(&stub, 4);
4969  frame_->EmitPush(r0);
4970}
4971
4972
4973void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
4974  // No stub. This code only occurs a few times in regexp.js.
4975  const int kMaxInlineLength = 100;
4976  ASSERT_EQ(3, args->length());
4977  Load(args->at(0));  // Size of array, smi.
4978  Load(args->at(1));  // "index" property value.
4979  Load(args->at(2));  // "input" property value.
4980  {
4981    VirtualFrame::SpilledScope spilled_scope(frame_);
4982    Label slowcase;
4983    Label done;
4984    __ ldr(r1, MemOperand(sp, kPointerSize * 2));
4985    STATIC_ASSERT(kSmiTag == 0);
4986    STATIC_ASSERT(kSmiTagSize == 1);
4987    __ tst(r1, Operand(kSmiTagMask));
4988    __ b(ne, &slowcase);
4989    __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
4990    __ b(hi, &slowcase);
4991    // Smi-tagging is equivalent to multiplying by 2.
4992    // Allocate RegExpResult followed by FixedArray with size in ebx.
4993    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
4994    // Elements:  [Map][Length][..elements..]
4995    // Size of JSArray with two in-object properties and the header of a
4996    // FixedArray.
4997    int objects_size =
4998        (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4999    __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
5000    __ add(r2, r5, Operand(objects_size));
5001    __ AllocateInNewSpace(
5002        r2,  // In: Size, in words.
5003        r0,  // Out: Start of allocation (tagged).
5004        r3,  // Scratch register.
5005        r4,  // Scratch register.
5006        &slowcase,
5007        static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5008    // r0: Start of allocated area, object-tagged.
5009    // r1: Number of elements in array, as smi.
5010    // r5: Number of elements, untagged.
5011
5012    // Set JSArray map to global.regexp_result_map().
5013    // Set empty properties FixedArray.
5014    // Set elements to point to FixedArray allocated right after the JSArray.
5015    // Interleave operations for better latency.
5016    __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
5017    __ add(r3, r0, Operand(JSRegExpResult::kSize));
5018    __ mov(r4, Operand(Factory::empty_fixed_array()));
5019    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5020    __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
5021    __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
5022    __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5023    __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5024
5025    // Set input, index and length fields from arguments.
5026    __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
5027    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
5028    __ add(sp, sp, Operand(kPointerSize));
5029    __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
5030    __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
5031
5032    // Fill out the elements FixedArray.
5033    // r0: JSArray, tagged.
5034    // r3: FixedArray, tagged.
5035    // r5: Number of elements in array, untagged.
5036
5037    // Set map.
5038    __ mov(r2, Operand(Factory::fixed_array_map()));
5039    __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
5040    // Set FixedArray length.
5041    __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5042    __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5043    // Fill contents of fixed-array with the-hole.
5044    __ mov(r2, Operand(Factory::the_hole_value()));
5045    __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5046    // Fill fixed array elements with hole.
5047    // r0: JSArray, tagged.
5048    // r2: the hole.
5049    // r3: Start of elements in FixedArray.
5050    // r5: Number of elements to fill.
5051    Label loop;
5052    __ tst(r5, Operand(r5));
5053    __ bind(&loop);
5054    __ b(le, &done);  // Jump if r1 is negative or zero.
5055    __ sub(r5, r5, Operand(1), SetCC);
5056    __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5057    __ jmp(&loop);
5058
5059    __ bind(&slowcase);
5060    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
5061
5062    __ bind(&done);
5063  }
5064  frame_->Forget(3);
5065  frame_->EmitPush(r0);
5066}
5067
5068
5069class DeferredSearchCache: public DeferredCode {
5070 public:
5071  DeferredSearchCache(Register dst, Register cache, Register key)
5072      : dst_(dst), cache_(cache), key_(key) {
5073    set_comment("[ DeferredSearchCache");
5074  }
5075
5076  virtual void Generate();
5077
5078 private:
5079  Register dst_, cache_, key_;
5080};
5081
5082
5083void DeferredSearchCache::Generate() {
5084  __ Push(cache_, key_);
5085  __ CallRuntime(Runtime::kGetFromCache, 2);
5086  if (!dst_.is(r0)) {
5087    __ mov(dst_, r0);
5088  }
5089}
5090
5091
5092void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
5093  ASSERT_EQ(2, args->length());
5094
5095  ASSERT_NE(NULL, args->at(0)->AsLiteral());
5096  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
5097
5098  Handle<FixedArray> jsfunction_result_caches(
5099      Top::global_context()->jsfunction_result_caches());
5100  if (jsfunction_result_caches->length() <= cache_id) {
5101    __ Abort("Attempt to use undefined cache.");
5102    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5103    return;
5104  }
5105
5106  Load(args->at(1));
5107
5108  VirtualFrame::SpilledScope spilled_scope(frame_);
5109
5110  frame_->EmitPop(r2);
5111
5112  __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
5113  __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
5114  __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
5115  __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
5116
5117  DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
5118
5119  const int kFingerOffset =
5120      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
5121  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5122  __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
5123  // r0 now holds finger offset as a smi.
5124  __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5125  // r3 now points to the start of fixed array elements.
5126  __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
5127  // Note side effect of PreIndex: r3 now points to the key of the pair.
5128  __ cmp(r2, r0);
5129  deferred->Branch(ne);
5130
5131  __ ldr(r0, MemOperand(r3, kPointerSize));
5132
5133  deferred->BindExit();
5134  frame_->EmitPush(r0);
5135}
5136
5137
5138void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
5139  ASSERT_EQ(args->length(), 1);
5140
5141  // Load the argument on the stack and jump to the runtime.
5142  Load(args->at(0));
5143
5144  NumberToStringStub stub;
5145  frame_->SpillAll();
5146  frame_->CallStub(&stub, 1);
5147  frame_->EmitPush(r0);
5148}
5149
5150
5151class DeferredSwapElements: public DeferredCode {
5152 public:
5153  DeferredSwapElements(Register object, Register index1, Register index2)
5154      : object_(object), index1_(index1), index2_(index2) {
5155    set_comment("[ DeferredSwapElements");
5156  }
5157
5158  virtual void Generate();
5159
5160 private:
5161  Register object_, index1_, index2_;
5162};
5163
5164
5165void DeferredSwapElements::Generate() {
5166  __ push(object_);
5167  __ push(index1_);
5168  __ push(index2_);
5169  __ CallRuntime(Runtime::kSwapElements, 3);
5170}
5171
5172
5173void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
5174  Comment cmnt(masm_, "[ GenerateSwapElements");
5175
5176  ASSERT_EQ(3, args->length());
5177
5178  Load(args->at(0));
5179  Load(args->at(1));
5180  Load(args->at(2));
5181
5182  VirtualFrame::SpilledScope spilled_scope(frame_);
5183
5184  Register index2 = r2;
5185  Register index1 = r1;
5186  Register object = r0;
5187  Register tmp1 = r3;
5188  Register tmp2 = r4;
5189
5190  frame_->EmitPop(index2);
5191  frame_->EmitPop(index1);
5192  frame_->EmitPop(object);
5193
5194  DeferredSwapElements* deferred =
5195      new DeferredSwapElements(object, index1, index2);
5196
5197  // Fetch the map and check if array is in fast case.
5198  // Check that object doesn't require security checks and
5199  // has no indexed interceptor.
5200  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
5201  deferred->Branch(lt);
5202  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
5203  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
5204  deferred->Branch(nz);
5205
5206  // Check the object's elements are in fast case.
5207  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
5208  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
5209  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5210  __ cmp(tmp2, ip);
5211  deferred->Branch(ne);
5212
5213  // Smi-tagging is equivalent to multiplying by 2.
5214  STATIC_ASSERT(kSmiTag == 0);
5215  STATIC_ASSERT(kSmiTagSize == 1);
5216
5217  // Check that both indices are smis.
5218  __ mov(tmp2, index1);
5219  __ orr(tmp2, tmp2, index2);
5220  __ tst(tmp2, Operand(kSmiTagMask));
5221  deferred->Branch(nz);
5222
5223  // Bring the offsets into the fixed array in tmp1 into index1 and
5224  // index2.
5225  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5226  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
5227  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
5228
5229  // Swap elements.
5230  Register tmp3 = object;
5231  object = no_reg;
5232  __ ldr(tmp3, MemOperand(tmp1, index1));
5233  __ ldr(tmp2, MemOperand(tmp1, index2));
5234  __ str(tmp3, MemOperand(tmp1, index2));
5235  __ str(tmp2, MemOperand(tmp1, index1));
5236
5237  Label done;
5238  __ InNewSpace(tmp1, tmp2, eq, &done);
5239  // Possible optimization: do a check that both values are Smis
5240  // (or them and test against Smi mask.)
5241
5242  __ mov(tmp2, tmp1);
5243  RecordWriteStub recordWrite1(tmp1, index1, tmp3);
5244  __ CallStub(&recordWrite1);
5245
5246  RecordWriteStub recordWrite2(tmp2, index2, tmp3);
5247  __ CallStub(&recordWrite2);
5248
5249  __ bind(&done);
5250
5251  deferred->BindExit();
5252  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
5253  frame_->EmitPush(tmp1);
5254}
5255
5256
5257void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
5258  Comment cmnt(masm_, "[ GenerateCallFunction");
5259
5260  ASSERT(args->length() >= 2);
5261
5262  int n_args = args->length() - 2;  // for receiver and function.
5263  Load(args->at(0));  // receiver
5264  for (int i = 0; i < n_args; i++) {
5265    Load(args->at(i + 1));
5266  }
5267  Load(args->at(n_args + 1));  // function
5268  frame_->CallJSFunction(n_args);
5269  frame_->EmitPush(r0);
5270}
5271
5272
5273void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
5274  ASSERT_EQ(args->length(), 1);
5275  Load(args->at(0));
5276  if (CpuFeatures::IsSupported(VFP3)) {
5277    TranscendentalCacheStub stub(TranscendentalCache::SIN);
5278    frame_->SpillAllButCopyTOSToR0();
5279    frame_->CallStub(&stub, 1);
5280  } else {
5281    frame_->CallRuntime(Runtime::kMath_sin, 1);
5282  }
5283  frame_->EmitPush(r0);
5284}
5285
5286
5287void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
5288  ASSERT_EQ(args->length(), 1);
5289  Load(args->at(0));
5290  if (CpuFeatures::IsSupported(VFP3)) {
5291    TranscendentalCacheStub stub(TranscendentalCache::COS);
5292    frame_->SpillAllButCopyTOSToR0();
5293    frame_->CallStub(&stub, 1);
5294  } else {
5295    frame_->CallRuntime(Runtime::kMath_cos, 1);
5296  }
5297  frame_->EmitPush(r0);
5298}
5299
5300
5301void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5302  ASSERT(args->length() == 2);
5303
5304  // Load the two objects into registers and perform the comparison.
5305  Load(args->at(0));
5306  Load(args->at(1));
5307  Register lhs = frame_->PopToRegister();
5308  Register rhs = frame_->PopToRegister(lhs);
5309  __ cmp(lhs, rhs);
5310  cc_reg_ = eq;
5311}
5312
5313
5314void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5315#ifdef DEBUG
5316  int original_height = frame_->height();
5317#endif
5318  if (CheckForInlineRuntimeCall(node)) {
5319    ASSERT((has_cc() && frame_->height() == original_height) ||
5320           (!has_cc() && frame_->height() == original_height + 1));
5321    return;
5322  }
5323
5324  ZoneList<Expression*>* args = node->arguments();
5325  Comment cmnt(masm_, "[ CallRuntime");
5326  Runtime::Function* function = node->function();
5327
5328  if (function == NULL) {
5329    // Prepare stack for calling JS runtime function.
5330    // Push the builtins object found in the current global object.
5331    Register scratch = VirtualFrame::scratch0();
5332    __ ldr(scratch, GlobalObject());
5333    Register builtins = frame_->GetTOSRegister();
5334    __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
5335    frame_->EmitPush(builtins);
5336  }
5337
5338  // Push the arguments ("left-to-right").
5339  int arg_count = args->length();
5340  for (int i = 0; i < arg_count; i++) {
5341    Load(args->at(i));
5342  }
5343
5344  VirtualFrame::SpilledScope spilled_scope(frame_);
5345
5346  if (function == NULL) {
5347    // Call the JS runtime function.
5348    __ mov(r2, Operand(node->name()));
5349    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
5350    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
5351    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
5352    __ ldr(cp, frame_->Context());
5353    frame_->EmitPush(r0);
5354  } else {
5355    // Call the C runtime function.
5356    frame_->CallRuntime(function, arg_count);
5357    frame_->EmitPush(r0);
5358  }
5359  ASSERT_EQ(original_height + 1, frame_->height());
5360}
5361
5362
5363void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5364#ifdef DEBUG
5365  int original_height = frame_->height();
5366#endif
5367  Comment cmnt(masm_, "[ UnaryOperation");
5368
5369  Token::Value op = node->op();
5370
5371  if (op == Token::NOT) {
5372    LoadCondition(node->expression(), false_target(), true_target(), true);
5373    // LoadCondition may (and usually does) leave a test and branch to
5374    // be emitted by the caller.  In that case, negate the condition.
5375    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
5376
5377  } else if (op == Token::DELETE) {
5378    Property* property = node->expression()->AsProperty();
5379    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5380    if (property != NULL) {
5381      Load(property->obj());
5382      Load(property->key());
5383      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5384      frame_->EmitPush(r0);
5385
5386    } else if (variable != NULL) {
5387      Slot* slot = variable->slot();
5388      if (variable->is_global()) {
5389        LoadGlobal();
5390        frame_->EmitPush(Operand(variable->name()));
5391        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5392        frame_->EmitPush(r0);
5393
5394      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5395        // lookup the context holding the named variable
5396        frame_->EmitPush(cp);
5397        frame_->EmitPush(Operand(variable->name()));
5398        frame_->CallRuntime(Runtime::kLookupContext, 2);
5399        // r0: context
5400        frame_->EmitPush(r0);
5401        frame_->EmitPush(Operand(variable->name()));
5402        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5403        frame_->EmitPush(r0);
5404
5405      } else {
5406        // Default: Result of deleting non-global, not dynamically
5407        // introduced variables is false.
5408        frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
5409      }
5410
5411    } else {
5412      // Default: Result of deleting expressions is true.
5413      Load(node->expression());  // may have side-effects
5414      frame_->Drop();
5415      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
5416    }
5417
5418  } else if (op == Token::TYPEOF) {
5419    // Special case for loading the typeof expression; see comment on
5420    // LoadTypeofExpression().
5421    LoadTypeofExpression(node->expression());
5422    frame_->CallRuntime(Runtime::kTypeof, 1);
5423    frame_->EmitPush(r0);  // r0 has result
5424
5425  } else {
5426    bool overwrite =
5427        (node->expression()->AsBinaryOperation() != NULL &&
5428         node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
5429    Load(node->expression());
5430    switch (op) {
5431      case Token::NOT:
5432      case Token::DELETE:
5433      case Token::TYPEOF:
5434        UNREACHABLE();  // handled above
5435        break;
5436
5437      case Token::SUB: {
5438        frame_->PopToR0();
5439        GenericUnaryOpStub stub(Token::SUB, overwrite);
5440        frame_->CallStub(&stub, 0);
5441        frame_->EmitPush(r0);  // r0 has result
5442        break;
5443      }
5444
5445      case Token::BIT_NOT: {
5446        Register tos = frame_->PopToRegister();
5447        JumpTarget not_smi_label;
5448        JumpTarget continue_label;
5449        // Smi check.
5450        __ tst(tos, Operand(kSmiTagMask));
5451        not_smi_label.Branch(ne);
5452
5453        __ mvn(tos, Operand(tos));
5454        __ bic(tos, tos, Operand(kSmiTagMask));  // Bit-clear inverted smi-tag.
5455        frame_->EmitPush(tos);
5456        // The fast case is the first to jump to the continue label, so it gets
5457        // to decide the virtual frame layout.
5458        continue_label.Jump();
5459
5460        not_smi_label.Bind();
5461        frame_->SpillAll();
5462        __ Move(r0, tos);
5463        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
5464        frame_->CallStub(&stub, 0);
5465        frame_->EmitPush(r0);
5466
5467        continue_label.Bind();
5468        break;
5469      }
5470
5471      case Token::VOID:
5472        frame_->Drop();
5473        frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5474        break;
5475
5476      case Token::ADD: {
5477        Register tos = frame_->Peek();
5478        // Smi check.
5479        JumpTarget continue_label;
5480        __ tst(tos, Operand(kSmiTagMask));
5481        continue_label.Branch(eq);
5482
5483        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5484        frame_->EmitPush(r0);
5485
5486        continue_label.Bind();
5487        break;
5488      }
5489      default:
5490        UNREACHABLE();
5491    }
5492  }
5493  ASSERT(!has_valid_frame() ||
5494         (has_cc() && frame_->height() == original_height) ||
5495         (!has_cc() && frame_->height() == original_height + 1));
5496}
5497
5498
5499void CodeGenerator::VisitCountOperation(CountOperation* node) {
5500#ifdef DEBUG
5501  int original_height = frame_->height();
5502#endif
5503  Comment cmnt(masm_, "[ CountOperation");
5504  VirtualFrame::RegisterAllocationScope scope(this);
5505
5506  bool is_postfix = node->is_postfix();
5507  bool is_increment = node->op() == Token::INC;
5508
5509  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
5510  bool is_const = (var != NULL && var->mode() == Variable::CONST);
5511  bool is_slot = (var != NULL && var->mode() == Variable::VAR);
5512
5513  if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
5514    // The type info declares that this variable is always a Smi.  That
5515    // means it is a Smi both before and after the increment/decrement.
5516    // Lets make use of that to make a very minimal count.
5517    Reference target(this, node->expression(), !is_const);
5518    ASSERT(!target.is_illegal());
5519    target.GetValue();  // Pushes the value.
5520    Register value = frame_->PopToRegister();
5521    if (is_postfix) frame_->EmitPush(value);
5522    if (is_increment) {
5523      __ add(value, value, Operand(Smi::FromInt(1)));
5524    } else {
5525      __ sub(value, value, Operand(Smi::FromInt(1)));
5526    }
5527    frame_->EmitPush(value);
5528    target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
5529    if (is_postfix) frame_->Pop();
5530    ASSERT_EQ(original_height + 1, frame_->height());
5531    return;
5532  }
5533
5534  // If it's a postfix expression and its result is not ignored and the
5535  // reference is non-trivial, then push a placeholder on the stack now
5536  // to hold the result of the expression.
5537  bool placeholder_pushed = false;
5538  if (!is_slot && is_postfix) {
5539    frame_->EmitPush(Operand(Smi::FromInt(0)));
5540    placeholder_pushed = true;
5541  }
5542
5543  // A constant reference is not saved to, so a constant reference is not a
5544  // compound assignment reference.
5545  { Reference target(this, node->expression(), !is_const);
5546    if (target.is_illegal()) {
5547      // Spoof the virtual frame to have the expected height (one higher
5548      // than on entry).
5549      if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
5550      ASSERT_EQ(original_height + 1, frame_->height());
5551      return;
5552    }
5553
5554    // This pushes 0, 1 or 2 words on the object to be used later when updating
5555    // the target.  It also pushes the current value of the target.
5556    target.GetValue();
5557
5558    JumpTarget slow;
5559    JumpTarget exit;
5560
5561    Register value = frame_->PopToRegister();
5562
5563    // Postfix: Store the old value as the result.
5564    if (placeholder_pushed) {
5565      frame_->SetElementAt(value, target.size());
5566    } else if (is_postfix) {
5567      frame_->EmitPush(value);
5568      __ mov(VirtualFrame::scratch0(), value);
5569      value = VirtualFrame::scratch0();
5570    }
5571
5572    // Check for smi operand.
5573    __ tst(value, Operand(kSmiTagMask));
5574    slow.Branch(ne);
5575
5576    // Perform optimistic increment/decrement.
5577    if (is_increment) {
5578      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
5579    } else {
5580      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
5581    }
5582
5583    // If the increment/decrement didn't overflow, we're done.
5584    exit.Branch(vc);
5585
5586    // Revert optimistic increment/decrement.
5587    if (is_increment) {
5588      __ sub(value, value, Operand(Smi::FromInt(1)));
5589    } else {
5590      __ add(value, value, Operand(Smi::FromInt(1)));
5591    }
5592
5593    // Slow case: Convert to number.  At this point the
5594    // value to be incremented is in the value register..
5595    slow.Bind();
5596
5597    // Convert the operand to a number.
5598    frame_->EmitPush(value);
5599
5600    {
5601      VirtualFrame::SpilledScope spilled(frame_);
5602      frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5603
5604      if (is_postfix) {
5605        // Postfix: store to result (on the stack).
5606        __ str(r0, frame_->ElementAt(target.size()));
5607      }
5608
5609      // Compute the new value.
5610      frame_->EmitPush(r0);
5611      frame_->EmitPush(Operand(Smi::FromInt(1)));
5612      if (is_increment) {
5613        frame_->CallRuntime(Runtime::kNumberAdd, 2);
5614      } else {
5615        frame_->CallRuntime(Runtime::kNumberSub, 2);
5616      }
5617    }
5618
5619    __ Move(value, r0);
5620    // Store the new value in the target if not const.
5621    // At this point the answer is in the value register.
5622    exit.Bind();
5623    frame_->EmitPush(value);
5624    // Set the target with the result, leaving the result on
5625    // top of the stack.  Removes the target from the stack if
5626    // it has a non-zero size.
5627    if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
5628  }
5629
5630  // Postfix: Discard the new value and use the old.
5631  if (is_postfix) frame_->Pop();
5632  ASSERT_EQ(original_height + 1, frame_->height());
5633}
5634
5635
5636void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
5637  // According to ECMA-262 section 11.11, page 58, the binary logical
5638  // operators must yield the result of one of the two expressions
5639  // before any ToBoolean() conversions. This means that the value
5640  // produced by a && or || operator is not necessarily a boolean.
5641
5642  // NOTE: If the left hand side produces a materialized value (not in
5643  // the CC register), we force the right hand side to do the
5644  // same. This is necessary because we may have to branch to the exit
5645  // after evaluating the left hand side (due to the shortcut
5646  // semantics), but the compiler must (statically) know if the result
5647  // of compiling the binary operation is materialized or not.
5648  if (node->op() == Token::AND) {
5649    JumpTarget is_true;
5650    LoadCondition(node->left(), &is_true, false_target(), false);
5651    if (has_valid_frame() && !has_cc()) {
5652      // The left-hand side result is on top of the virtual frame.
5653      JumpTarget pop_and_continue;
5654      JumpTarget exit;
5655
5656      frame_->Dup();
5657      // Avoid popping the result if it converts to 'false' using the
5658      // standard ToBoolean() conversion as described in ECMA-262,
5659      // section 9.2, page 30.
5660      ToBoolean(&pop_and_continue, &exit);
5661      Branch(false, &exit);
5662
5663      // Pop the result of evaluating the first part.
5664      pop_and_continue.Bind();
5665      frame_->Pop();
5666
5667      // Evaluate right side expression.
5668      is_true.Bind();
5669      Load(node->right());
5670
5671      // Exit (always with a materialized value).
5672      exit.Bind();
5673    } else if (has_cc() || is_true.is_linked()) {
5674      // The left-hand side is either (a) partially compiled to
5675      // control flow with a final branch left to emit or (b) fully
5676      // compiled to control flow and possibly true.
5677      if (has_cc()) {
5678        Branch(false, false_target());
5679      }
5680      is_true.Bind();
5681      LoadCondition(node->right(), true_target(), false_target(), false);
5682    } else {
5683      // Nothing to do.
5684      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
5685    }
5686
5687  } else {
5688    ASSERT(node->op() == Token::OR);
5689    JumpTarget is_false;
5690    LoadCondition(node->left(), true_target(), &is_false, false);
5691    if (has_valid_frame() && !has_cc()) {
5692      // The left-hand side result is on top of the virtual frame.
5693      JumpTarget pop_and_continue;
5694      JumpTarget exit;
5695
5696      frame_->Dup();
5697      // Avoid popping the result if it converts to 'true' using the
5698      // standard ToBoolean() conversion as described in ECMA-262,
5699      // section 9.2, page 30.
5700      ToBoolean(&exit, &pop_and_continue);
5701      Branch(true, &exit);
5702
5703      // Pop the result of evaluating the first part.
5704      pop_and_continue.Bind();
5705      frame_->Pop();
5706
5707      // Evaluate right side expression.
5708      is_false.Bind();
5709      Load(node->right());
5710
5711      // Exit (always with a materialized value).
5712      exit.Bind();
5713    } else if (has_cc() || is_false.is_linked()) {
5714      // The left-hand side is either (a) partially compiled to
5715      // control flow with a final branch left to emit or (b) fully
5716      // compiled to control flow and possibly false.
5717      if (has_cc()) {
5718        Branch(true, true_target());
5719      }
5720      is_false.Bind();
5721      LoadCondition(node->right(), true_target(), false_target(), false);
5722    } else {
5723      // Nothing to do.
5724      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
5725    }
5726  }
5727}
5728
5729
5730void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
5731#ifdef DEBUG
5732  int original_height = frame_->height();
5733#endif
5734  Comment cmnt(masm_, "[ BinaryOperation");
5735
5736  if (node->op() == Token::AND || node->op() == Token::OR) {
5737    GenerateLogicalBooleanOperation(node);
5738  } else {
5739    // Optimize for the case where (at least) one of the expressions
5740    // is a literal small integer.
5741    Literal* lliteral = node->left()->AsLiteral();
5742    Literal* rliteral = node->right()->AsLiteral();
5743    // NOTE: The code below assumes that the slow cases (calls to runtime)
5744    // never return a constant/immutable object.
5745    bool overwrite_left =
5746        (node->left()->AsBinaryOperation() != NULL &&
5747         node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
5748    bool overwrite_right =
5749        (node->right()->AsBinaryOperation() != NULL &&
5750         node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
5751
5752    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
5753      VirtualFrame::RegisterAllocationScope scope(this);
5754      Load(node->left());
5755      if (frame_->KnownSmiAt(0)) overwrite_left = false;
5756      SmiOperation(node->op(),
5757                   rliteral->handle(),
5758                   false,
5759                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
5760    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
5761      VirtualFrame::RegisterAllocationScope scope(this);
5762      Load(node->right());
5763      if (frame_->KnownSmiAt(0)) overwrite_right = false;
5764      SmiOperation(node->op(),
5765                   lliteral->handle(),
5766                   true,
5767                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
5768    } else {
5769      GenerateInlineSmi inline_smi =
5770          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
5771      if (lliteral != NULL) {
5772        ASSERT(!lliteral->handle()->IsSmi());
5773        inline_smi = DONT_GENERATE_INLINE_SMI;
5774      }
5775      if (rliteral != NULL) {
5776        ASSERT(!rliteral->handle()->IsSmi());
5777        inline_smi = DONT_GENERATE_INLINE_SMI;
5778      }
5779      VirtualFrame::RegisterAllocationScope scope(this);
5780      OverwriteMode overwrite_mode = NO_OVERWRITE;
5781      if (overwrite_left) {
5782        overwrite_mode = OVERWRITE_LEFT;
5783      } else if (overwrite_right) {
5784        overwrite_mode = OVERWRITE_RIGHT;
5785      }
5786      Load(node->left());
5787      Load(node->right());
5788      GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
5789    }
5790  }
5791  ASSERT(!has_valid_frame() ||
5792         (has_cc() && frame_->height() == original_height) ||
5793         (!has_cc() && frame_->height() == original_height + 1));
5794}
5795
5796
5797void CodeGenerator::VisitThisFunction(ThisFunction* node) {
5798#ifdef DEBUG
5799  int original_height = frame_->height();
5800#endif
5801  frame_->EmitPush(MemOperand(frame_->Function()));
5802  ASSERT_EQ(original_height + 1, frame_->height());
5803}
5804
5805
5806void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
5807#ifdef DEBUG
5808  int original_height = frame_->height();
5809#endif
5810  Comment cmnt(masm_, "[ CompareOperation");
5811
5812  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
5813
5814  // Get the expressions from the node.
5815  Expression* left = node->left();
5816  Expression* right = node->right();
5817  Token::Value op = node->op();
5818
5819  // To make null checks efficient, we check if either left or right is the
5820  // literal 'null'. If so, we optimize the code by inlining a null check
5821  // instead of calling the (very) general runtime routine for checking
5822  // equality.
5823  if (op == Token::EQ || op == Token::EQ_STRICT) {
5824    bool left_is_null =
5825        left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
5826    bool right_is_null =
5827        right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
5828    // The 'null' value can only be equal to 'null' or 'undefined'.
5829    if (left_is_null || right_is_null) {
5830      Load(left_is_null ? right : left);
5831      Register tos = frame_->PopToRegister();
5832      __ LoadRoot(ip, Heap::kNullValueRootIndex);
5833      __ cmp(tos, ip);
5834
5835      // The 'null' value is only equal to 'undefined' if using non-strict
5836      // comparisons.
5837      if (op != Token::EQ_STRICT) {
5838        true_target()->Branch(eq);
5839
5840        __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5841        __ cmp(tos, Operand(ip));
5842        true_target()->Branch(eq);
5843
5844        __ tst(tos, Operand(kSmiTagMask));
5845        false_target()->Branch(eq);
5846
5847        // It can be an undetectable object.
5848        __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5849        __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
5850        __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5851        __ cmp(tos, Operand(1 << Map::kIsUndetectable));
5852      }
5853
5854      cc_reg_ = eq;
5855      ASSERT(has_cc() && frame_->height() == original_height);
5856      return;
5857    }
5858  }
5859
5860  // To make typeof testing for natives implemented in JavaScript really
5861  // efficient, we generate special code for expressions of the form:
5862  // 'typeof <expression> == <string>'.
5863  UnaryOperation* operation = left->AsUnaryOperation();
5864  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
5865      (operation != NULL && operation->op() == Token::TYPEOF) &&
5866      (right->AsLiteral() != NULL &&
5867       right->AsLiteral()->handle()->IsString())) {
5868    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
5869
5870    // Load the operand, move it to a register.
5871    LoadTypeofExpression(operation->expression());
5872    Register tos = frame_->PopToRegister();
5873
5874    Register scratch = VirtualFrame::scratch0();
5875
5876    if (check->Equals(Heap::number_symbol())) {
5877      __ tst(tos, Operand(kSmiTagMask));
5878      true_target()->Branch(eq);
5879      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5880      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5881      __ cmp(tos, ip);
5882      cc_reg_ = eq;
5883
5884    } else if (check->Equals(Heap::string_symbol())) {
5885      __ tst(tos, Operand(kSmiTagMask));
5886      false_target()->Branch(eq);
5887
5888      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5889
5890      // It can be an undetectable string object.
5891      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5892      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5893      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
5894      false_target()->Branch(eq);
5895
5896      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
5897      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
5898      cc_reg_ = lt;
5899
5900    } else if (check->Equals(Heap::boolean_symbol())) {
5901      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5902      __ cmp(tos, ip);
5903      true_target()->Branch(eq);
5904      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5905      __ cmp(tos, ip);
5906      cc_reg_ = eq;
5907
5908    } else if (check->Equals(Heap::undefined_symbol())) {
5909      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5910      __ cmp(tos, ip);
5911      true_target()->Branch(eq);
5912
5913      __ tst(tos, Operand(kSmiTagMask));
5914      false_target()->Branch(eq);
5915
5916      // It can be an undetectable object.
5917      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5918      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5919      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5920      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
5921
5922      cc_reg_ = eq;
5923
5924    } else if (check->Equals(Heap::function_symbol())) {
5925      __ tst(tos, Operand(kSmiTagMask));
5926      false_target()->Branch(eq);
5927      Register map_reg = scratch;
5928      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
5929      true_target()->Branch(eq);
5930      // Regular expressions are callable so typeof == 'function'.
5931      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
5932      cc_reg_ = eq;
5933
5934    } else if (check->Equals(Heap::object_symbol())) {
5935      __ tst(tos, Operand(kSmiTagMask));
5936      false_target()->Branch(eq);
5937
5938      __ LoadRoot(ip, Heap::kNullValueRootIndex);
5939      __ cmp(tos, ip);
5940      true_target()->Branch(eq);
5941
5942      Register map_reg = scratch;
5943      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
5944      false_target()->Branch(eq);
5945
5946      // It can be an undetectable object.
5947      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
5948      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5949      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
5950      false_target()->Branch(eq);
5951
5952      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
5953      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
5954      false_target()->Branch(lt);
5955      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
5956      cc_reg_ = le;
5957
5958    } else {
5959      // Uncommon case: typeof testing against a string literal that is
5960      // never returned from the typeof operator.
5961      false_target()->Jump();
5962    }
5963    ASSERT(!has_valid_frame() ||
5964           (has_cc() && frame_->height() == original_height));
5965    return;
5966  }
5967
5968  switch (op) {
5969    case Token::EQ:
5970      Comparison(eq, left, right, false);
5971      break;
5972
5973    case Token::LT:
5974      Comparison(lt, left, right);
5975      break;
5976
5977    case Token::GT:
5978      Comparison(gt, left, right);
5979      break;
5980
5981    case Token::LTE:
5982      Comparison(le, left, right);
5983      break;
5984
5985    case Token::GTE:
5986      Comparison(ge, left, right);
5987      break;
5988
5989    case Token::EQ_STRICT:
5990      Comparison(eq, left, right, true);
5991      break;
5992
5993    case Token::IN: {
5994      Load(left);
5995      Load(right);
5996      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
5997      frame_->EmitPush(r0);
5998      break;
5999    }
6000
6001    case Token::INSTANCEOF: {
6002      Load(left);
6003      Load(right);
6004      InstanceofStub stub;
6005      frame_->CallStub(&stub, 2);
6006      // At this point if instanceof succeeded then r0 == 0.
6007      __ tst(r0, Operand(r0));
6008      cc_reg_ = eq;
6009      break;
6010    }
6011
6012    default:
6013      UNREACHABLE();
6014  }
6015  ASSERT((has_cc() && frame_->height() == original_height) ||
6016         (!has_cc() && frame_->height() == original_height + 1));
6017}
6018
6019
6020class DeferredReferenceGetNamedValue: public DeferredCode {
6021 public:
6022  explicit DeferredReferenceGetNamedValue(Register receiver,
6023                                          Handle<String> name)
6024      : receiver_(receiver), name_(name) {
6025    set_comment("[ DeferredReferenceGetNamedValue");
6026  }
6027
6028  virtual void Generate();
6029
6030 private:
6031  Register receiver_;
6032  Handle<String> name_;
6033};
6034
6035
6036// Convention for this is that on entry the receiver is in a register that
6037// is not used by the stack.  On exit the answer is found in that same
6038// register and the stack has the same height.
6039void DeferredReferenceGetNamedValue::Generate() {
6040#ifdef DEBUG
6041  int expected_height = frame_state()->frame()->height();
6042#endif
6043  VirtualFrame copied_frame(*frame_state()->frame());
6044  copied_frame.SpillAll();
6045
6046  Register scratch1 = VirtualFrame::scratch0();
6047  Register scratch2 = VirtualFrame::scratch1();
6048  ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
6049  __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
6050  __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
6051
6052  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
6053  __ Move(r0, receiver_);
6054  __ mov(r2, Operand(name_));
6055
6056  // The rest of the instructions in the deferred code must be together.
6057  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6058    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
6059    __ Call(ic, RelocInfo::CODE_TARGET);
6060    // The call must be followed by a nop(1) instruction to indicate that the
6061    // in-object has been inlined.
6062    __ nop(PROPERTY_ACCESS_INLINED);
6063
6064    // At this point the answer is in r0.  We move it to the expected register
6065    // if necessary.
6066    __ Move(receiver_, r0);
6067
6068    // Now go back to the frame that we entered with.  This will not overwrite
6069    // the receiver register since that register was not in use when we came
6070    // in.  The instructions emitted by this merge are skipped over by the
6071    // inline load patching mechanism when looking for the branch instruction
6072    // that tells it where the code to patch is.
6073    copied_frame.MergeTo(frame_state()->frame());
6074
6075    // Block the constant pool for one more instruction after leaving this
6076    // constant pool block scope to include the branch instruction ending the
6077    // deferred code.
6078    __ BlockConstPoolFor(1);
6079  }
6080  ASSERT_EQ(expected_height, frame_state()->frame()->height());
6081}
6082
6083
6084class DeferredReferenceGetKeyedValue: public DeferredCode {
6085 public:
6086  DeferredReferenceGetKeyedValue(Register key, Register receiver)
6087      : key_(key), receiver_(receiver) {
6088    set_comment("[ DeferredReferenceGetKeyedValue");
6089  }
6090
6091  virtual void Generate();
6092
6093 private:
6094  Register key_;
6095  Register receiver_;
6096};
6097
6098
6099// Takes key and register in r0 and r1 or vice versa.  Returns result
6100// in r0.
6101void DeferredReferenceGetKeyedValue::Generate() {
6102  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
6103         (key_.is(r1) && receiver_.is(r0)));
6104
6105  VirtualFrame copied_frame(*frame_state()->frame());
6106  copied_frame.SpillAll();
6107
6108  Register scratch1 = VirtualFrame::scratch0();
6109  Register scratch2 = VirtualFrame::scratch1();
6110  __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
6111  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
6112
6113  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
6114  // convention.
6115  if (key_.is(r1)) {
6116    __ Swap(r0, r1, ip);
6117  }
6118
6119  // The rest of the instructions in the deferred code must be together.
6120  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6121    // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
6122    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
6123    __ Call(ic, RelocInfo::CODE_TARGET);
6124    // The call must be followed by a nop instruction to indicate that the
6125    // keyed load has been inlined.
6126    __ nop(PROPERTY_ACCESS_INLINED);
6127
6128    // Now go back to the frame that we entered with.  This will not overwrite
6129    // the receiver or key registers since they were not in use when we came
6130    // in.  The instructions emitted by this merge are skipped over by the
6131    // inline load patching mechanism when looking for the branch instruction
6132    // that tells it where the code to patch is.
6133    copied_frame.MergeTo(frame_state()->frame());
6134
6135    // Block the constant pool for one more instruction after leaving this
6136    // constant pool block scope to include the branch instruction ending the
6137    // deferred code.
6138    __ BlockConstPoolFor(1);
6139  }
6140}
6141
6142
6143class DeferredReferenceSetKeyedValue: public DeferredCode {
6144 public:
6145  DeferredReferenceSetKeyedValue(Register value,
6146                                 Register key,
6147                                 Register receiver)
6148      : value_(value), key_(key), receiver_(receiver) {
6149    set_comment("[ DeferredReferenceSetKeyedValue");
6150  }
6151
6152  virtual void Generate();
6153
6154 private:
6155  Register value_;
6156  Register key_;
6157  Register receiver_;
6158};
6159
6160
6161void DeferredReferenceSetKeyedValue::Generate() {
6162  Register scratch1 = VirtualFrame::scratch0();
6163  Register scratch2 = VirtualFrame::scratch1();
6164  __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
6165  __ IncrementCounter(
6166      &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
6167
6168  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
6169  // calling convention.
6170  if (value_.is(r1)) {
6171    __ Swap(r0, r1, ip);
6172  }
6173  ASSERT(receiver_.is(r2));
6174
6175  // The rest of the instructions in the deferred code must be together.
6176  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6177    // Call keyed store IC. It has the arguments value, key and receiver in r0,
6178    // r1 and r2.
6179    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
6180    __ Call(ic, RelocInfo::CODE_TARGET);
6181    // The call must be followed by a nop instruction to indicate that the
6182    // keyed store has been inlined.
6183    __ nop(PROPERTY_ACCESS_INLINED);
6184
6185    // Block the constant pool for one more instruction after leaving this
6186    // constant pool block scope to include the branch instruction ending the
6187    // deferred code.
6188    __ BlockConstPoolFor(1);
6189  }
6190}
6191
6192
6193// Consumes the top of stack (the receiver) and pushes the result instead.
6194void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
6195  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6196    Comment cmnt(masm(), "[ Load from named Property");
6197    // Setup the name register and call load IC.
6198    frame_->CallLoadIC(name,
6199                       is_contextual
6200                           ? RelocInfo::CODE_TARGET_CONTEXT
6201                           : RelocInfo::CODE_TARGET);
6202    frame_->EmitPush(r0);  // Push answer.
6203  } else {
6204    // Inline the in-object property case.
6205    Comment cmnt(masm(), "[ Inlined named property load");
6206
6207    // Counter will be decremented in the deferred code. Placed here to avoid
6208    // having it in the instruction stream below where patching will occur.
6209    __ IncrementCounter(&Counters::named_load_inline, 1,
6210                        frame_->scratch0(), frame_->scratch1());
6211
6212    // The following instructions are the inlined load of an in-object property.
6213    // Parts of this code is patched, so the exact instructions generated needs
6214    // to be fixed. Therefore the instruction pool is blocked when generating
6215    // this code
6216
6217    // Load the receiver from the stack.
6218    Register receiver = frame_->PopToRegister();
6219
6220    DeferredReferenceGetNamedValue* deferred =
6221        new DeferredReferenceGetNamedValue(receiver, name);
6222
6223#ifdef DEBUG
6224    int kInlinedNamedLoadInstructions = 7;
6225    Label check_inlined_codesize;
6226    masm_->bind(&check_inlined_codesize);
6227#endif
6228
6229    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6230      // Check that the receiver is a heap object.
6231      __ tst(receiver, Operand(kSmiTagMask));
6232      deferred->Branch(eq);
6233
6234      Register scratch = VirtualFrame::scratch0();
6235      Register scratch2 = VirtualFrame::scratch1();
6236
6237      // Check the map. The null map used below is patched by the inline cache
6238      // code.  Therefore we can't use a LoadRoot call.
6239      __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
6240      __ mov(scratch2, Operand(Factory::null_value()));
6241      __ cmp(scratch, scratch2);
6242      deferred->Branch(ne);
6243
6244      // Initially use an invalid index. The index will be patched by the
6245      // inline cache code.
6246      __ ldr(receiver, MemOperand(receiver, 0));
6247
6248      // Make sure that the expected number of instructions are generated.
6249      ASSERT_EQ(kInlinedNamedLoadInstructions,
6250                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6251    }
6252
6253    deferred->BindExit();
6254    // At this point the receiver register has the result, either from the
6255    // deferred code or from the inlined code.
6256    frame_->EmitPush(receiver);
6257  }
6258}
6259
6260
6261void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
6262#ifdef DEBUG
6263  int expected_height = frame_->height() - (is_contextual ? 1 : 2);
6264#endif
6265  frame_->CallStoreIC(name, is_contextual);
6266
6267  ASSERT_EQ(expected_height, frame_->height());
6268}
6269
6270
6271void CodeGenerator::EmitKeyedLoad() {
6272  if (loop_nesting() == 0) {
6273    Comment cmnt(masm_, "[ Load from keyed property");
6274    frame_->CallKeyedLoadIC();
6275  } else {
6276    // Inline the keyed load.
6277    Comment cmnt(masm_, "[ Inlined load from keyed property");
6278
6279    // Counter will be decremented in the deferred code. Placed here to avoid
6280    // having it in the instruction stream below where patching will occur.
6281    __ IncrementCounter(&Counters::keyed_load_inline, 1,
6282                        frame_->scratch0(), frame_->scratch1());
6283
6284    // Load the key and receiver from the stack.
6285    bool key_is_known_smi = frame_->KnownSmiAt(0);
6286    Register key = frame_->PopToRegister();
6287    Register receiver = frame_->PopToRegister(key);
6288
6289    // The deferred code expects key and receiver in registers.
6290    DeferredReferenceGetKeyedValue* deferred =
6291        new DeferredReferenceGetKeyedValue(key, receiver);
6292
6293    // Check that the receiver is a heap object.
6294    __ tst(receiver, Operand(kSmiTagMask));
6295    deferred->Branch(eq);
6296
6297    // The following instructions are the part of the inlined load keyed
6298    // property code which can be patched. Therefore the exact number of
6299    // instructions generated need to be fixed, so the constant pool is blocked
6300    // while generating this code.
6301    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6302      Register scratch1 = VirtualFrame::scratch0();
6303      Register scratch2 = VirtualFrame::scratch1();
6304      // Check the map. The null map used below is patched by the inline cache
6305      // code.
6306      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
6307
6308      // Check that the key is a smi.
6309      if (!key_is_known_smi) {
6310        __ tst(key, Operand(kSmiTagMask));
6311        deferred->Branch(ne);
6312      }
6313
6314#ifdef DEBUG
6315      Label check_inlined_codesize;
6316      masm_->bind(&check_inlined_codesize);
6317#endif
6318      __ mov(scratch2, Operand(Factory::null_value()));
6319      __ cmp(scratch1, scratch2);
6320      deferred->Branch(ne);
6321
6322      // Get the elements array from the receiver and check that it
6323      // is not a dictionary.
6324      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
6325      if (FLAG_debug_code) {
6326        __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
6327        __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
6328        __ cmp(scratch2, ip);
6329        __ Assert(eq, "JSObject with fast elements map has slow elements");
6330      }
6331
6332      // Check that key is within bounds. Use unsigned comparison to handle
6333      // negative keys.
6334      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
6335      __ cmp(scratch2, key);
6336      deferred->Branch(ls);  // Unsigned less equal.
6337
6338      // Load and check that the result is not the hole (key is a smi).
6339      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
6340      __ add(scratch1,
6341             scratch1,
6342             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6343      __ ldr(scratch1,
6344             MemOperand(scratch1, key, LSL,
6345                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
6346      __ cmp(scratch1, scratch2);
6347      deferred->Branch(eq);
6348
6349      __ mov(r0, scratch1);
6350      // Make sure that the expected number of instructions are generated.
6351      ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
6352                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6353    }
6354
6355    deferred->BindExit();
6356  }
6357}
6358
6359
6360void CodeGenerator::EmitKeyedStore(StaticType* key_type,
6361                                   WriteBarrierCharacter wb_info) {
6362  // Generate inlined version of the keyed store if the code is in a loop
6363  // and the key is likely to be a smi.
6364  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
6365    // Inline the keyed store.
6366    Comment cmnt(masm_, "[ Inlined store to keyed property");
6367
6368    Register scratch1 = VirtualFrame::scratch0();
6369    Register scratch2 = VirtualFrame::scratch1();
6370    Register scratch3 = r3;
6371
6372    // Counter will be decremented in the deferred code. Placed here to avoid
6373    // having it in the instruction stream below where patching will occur.
6374    __ IncrementCounter(&Counters::keyed_store_inline, 1,
6375                        scratch1, scratch2);
6376
6377
6378
6379    // Load the value, key and receiver from the stack.
6380    bool value_is_harmless = frame_->KnownSmiAt(0);
6381    if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
6382    bool key_is_smi = frame_->KnownSmiAt(1);
6383    Register value = frame_->PopToRegister();
6384    Register key = frame_->PopToRegister(value);
6385    VirtualFrame::SpilledScope spilled(frame_);
6386    Register receiver = r2;
6387    frame_->EmitPop(receiver);
6388
6389#ifdef DEBUG
6390    bool we_remembered_the_write_barrier = value_is_harmless;
6391#endif
6392
6393    // The deferred code expects value, key and receiver in registers.
6394    DeferredReferenceSetKeyedValue* deferred =
6395        new DeferredReferenceSetKeyedValue(value, key, receiver);
6396
6397    // Check that the value is a smi. As this inlined code does not set the
6398    // write barrier it is only possible to store smi values.
6399    if (!value_is_harmless) {
6400      // If the value is not likely to be a Smi then let's test the fixed array
6401      // for new space instead.  See below.
6402      if (wb_info == LIKELY_SMI) {
6403        __ tst(value, Operand(kSmiTagMask));
6404        deferred->Branch(ne);
6405#ifdef DEBUG
6406        we_remembered_the_write_barrier = true;
6407#endif
6408      }
6409    }
6410
6411    if (!key_is_smi) {
6412      // Check that the key is a smi.
6413      __ tst(key, Operand(kSmiTagMask));
6414      deferred->Branch(ne);
6415    }
6416
6417    // Check that the receiver is a heap object.
6418    __ tst(receiver, Operand(kSmiTagMask));
6419    deferred->Branch(eq);
6420
6421    // Check that the receiver is a JSArray.
6422    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
6423    deferred->Branch(ne);
6424
6425    // Check that the key is within bounds. Both the key and the length of
6426    // the JSArray are smis. Use unsigned comparison to handle negative keys.
6427    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
6428    __ cmp(scratch1, key);
6429    deferred->Branch(ls);  // Unsigned less equal.
6430
6431    // Get the elements array from the receiver.
6432    __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
6433    if (!value_is_harmless && wb_info != LIKELY_SMI) {
6434      Label ok;
6435      __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
6436      __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
6437      __ tst(value, Operand(kSmiTagMask), ne);
6438      deferred->Branch(ne);
6439#ifdef DEBUG
6440      we_remembered_the_write_barrier = true;
6441#endif
6442    }
6443    // Check that the elements array is not a dictionary.
6444    __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
6445    // The following instructions are the part of the inlined store keyed
6446    // property code which can be patched. Therefore the exact number of
6447    // instructions generated need to be fixed, so the constant pool is blocked
6448    // while generating this code.
6449    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6450#ifdef DEBUG
6451      Label check_inlined_codesize;
6452      masm_->bind(&check_inlined_codesize);
6453#endif
6454
6455      // Read the fixed array map from the constant pool (not from the root
6456      // array) so that the value can be patched.  When debugging, we patch this
6457      // comparison to always fail so that we will hit the IC call in the
6458      // deferred code which will allow the debugger to break for fast case
6459      // stores.
6460      __ mov(scratch3, Operand(Factory::fixed_array_map()));
6461      __ cmp(scratch2, scratch3);
6462      deferred->Branch(ne);
6463
6464      // Store the value.
6465      __ add(scratch1, scratch1,
6466             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6467      __ str(value,
6468             MemOperand(scratch1, key, LSL,
6469                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
6470
6471      // Make sure that the expected number of instructions are generated.
6472      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
6473                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6474    }
6475
6476    ASSERT(we_remembered_the_write_barrier);
6477
6478    deferred->BindExit();
6479  } else {
6480    frame()->CallKeyedStoreIC();
6481  }
6482}
6483
6484
6485#ifdef DEBUG
6486bool CodeGenerator::HasValidEntryRegisters() { return true; }
6487#endif
6488
6489
6490#undef __
6491#define __ ACCESS_MASM(masm)
6492
6493Handle<String> Reference::GetName() {
6494  ASSERT(type_ == NAMED);
6495  Property* property = expression_->AsProperty();
6496  if (property == NULL) {
6497    // Global variable reference treated as a named property reference.
6498    VariableProxy* proxy = expression_->AsVariableProxy();
6499    ASSERT(proxy->AsVariable() != NULL);
6500    ASSERT(proxy->AsVariable()->is_global());
6501    return proxy->name();
6502  } else {
6503    Literal* raw_name = property->key()->AsLiteral();
6504    ASSERT(raw_name != NULL);
6505    return Handle<String>(String::cast(*raw_name->handle()));
6506  }
6507}
6508
6509
6510void Reference::DupIfPersist() {
6511  if (persist_after_get_) {
6512    switch (type_) {
6513      case KEYED:
6514        cgen_->frame()->Dup2();
6515        break;
6516      case NAMED:
6517        cgen_->frame()->Dup();
6518        // Fall through.
6519      case UNLOADED:
6520      case ILLEGAL:
6521      case SLOT:
6522        // Do nothing.
6523        ;
6524    }
6525  } else {
6526    set_unloaded();
6527  }
6528}
6529
6530
6531void Reference::GetValue() {
6532  ASSERT(cgen_->HasValidEntryRegisters());
6533  ASSERT(!is_illegal());
6534  ASSERT(!cgen_->has_cc());
6535  MacroAssembler* masm = cgen_->masm();
6536  Property* property = expression_->AsProperty();
6537  if (property != NULL) {
6538    cgen_->CodeForSourcePosition(property->position());
6539  }
6540
6541  switch (type_) {
6542    case SLOT: {
6543      Comment cmnt(masm, "[ Load from Slot");
6544      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6545      ASSERT(slot != NULL);
6546      DupIfPersist();
6547      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
6548      break;
6549    }
6550
6551    case NAMED: {
6552      Variable* var = expression_->AsVariableProxy()->AsVariable();
6553      bool is_global = var != NULL;
6554      ASSERT(!is_global || var->is_global());
6555      Handle<String> name = GetName();
6556      DupIfPersist();
6557      cgen_->EmitNamedLoad(name, is_global);
6558      break;
6559    }
6560
6561    case KEYED: {
6562      ASSERT(property != NULL);
6563      DupIfPersist();
6564      cgen_->EmitKeyedLoad();
6565      cgen_->frame()->EmitPush(r0);
6566      break;
6567    }
6568
6569    default:
6570      UNREACHABLE();
6571  }
6572}
6573
6574
6575void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
6576  ASSERT(!is_illegal());
6577  ASSERT(!cgen_->has_cc());
6578  MacroAssembler* masm = cgen_->masm();
6579  VirtualFrame* frame = cgen_->frame();
6580  Property* property = expression_->AsProperty();
6581  if (property != NULL) {
6582    cgen_->CodeForSourcePosition(property->position());
6583  }
6584
6585  switch (type_) {
6586    case SLOT: {
6587      Comment cmnt(masm, "[ Store to Slot");
6588      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6589      cgen_->StoreToSlot(slot, init_state);
6590      set_unloaded();
6591      break;
6592    }
6593
6594    case NAMED: {
6595      Comment cmnt(masm, "[ Store to named Property");
6596      cgen_->EmitNamedStore(GetName(), false);
6597      frame->EmitPush(r0);
6598      set_unloaded();
6599      break;
6600    }
6601
6602    case KEYED: {
6603      Comment cmnt(masm, "[ Store to keyed Property");
6604      Property* property = expression_->AsProperty();
6605      ASSERT(property != NULL);
6606      cgen_->CodeForSourcePosition(property->position());
6607      cgen_->EmitKeyedStore(property->key()->type(), wb_info);
6608      frame->EmitPush(r0);
6609      set_unloaded();
6610      break;
6611    }
6612
6613    default:
6614      UNREACHABLE();
6615  }
6616}
6617
6618
6619void FastNewClosureStub::Generate(MacroAssembler* masm) {
6620  // Create a new closure from the given function info in new
6621  // space. Set the context to the current context in cp.
6622  Label gc;
6623
6624  // Pop the function info from the stack.
6625  __ pop(r3);
6626
6627  // Attempt to allocate new JSFunction in new space.
6628  __ AllocateInNewSpace(JSFunction::kSize,
6629                        r0,
6630                        r1,
6631                        r2,
6632                        &gc,
6633                        TAG_OBJECT);
6634
6635  // Compute the function map in the current global context and set that
6636  // as the map of the allocated object.
6637  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6638  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
6639  __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
6640  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6641
6642  // Initialize the rest of the function. We don't have to update the
6643  // write barrier because the allocated object is in new space.
6644  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
6645  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
6646  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
6647  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
6648  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
6649  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
6650  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
6651  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
6652
6653  // Return result. The argument function info has been popped already.
6654  __ Ret();
6655
6656  // Create a new closure through the slower runtime call.
6657  __ bind(&gc);
6658  __ Push(cp, r3);
6659  __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
6660}
6661
6662
6663void FastNewContextStub::Generate(MacroAssembler* masm) {
6664  // Try to allocate the context in new space.
6665  Label gc;
6666  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
6667
6668  // Attempt to allocate the context in new space.
6669  __ AllocateInNewSpace(FixedArray::SizeFor(length),
6670                        r0,
6671                        r1,
6672                        r2,
6673                        &gc,
6674                        TAG_OBJECT);
6675
6676  // Load the function from the stack.
6677  __ ldr(r3, MemOperand(sp, 0));
6678
6679  // Setup the object header.
6680  __ LoadRoot(r2, Heap::kContextMapRootIndex);
6681  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6682  __ mov(r2, Operand(Smi::FromInt(length)));
6683  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
6684
6685  // Setup the fixed slots.
6686  __ mov(r1, Operand(Smi::FromInt(0)));
6687  __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
6688  __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
6689  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
6690  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
6691
6692  // Copy the global object from the surrounding context.
6693  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6694  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
6695
6696  // Initialize the rest of the slots to undefined.
6697  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
6698  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
6699    __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
6700  }
6701
6702  // Remove the on-stack argument and return.
6703  __ mov(cp, r0);
6704  __ pop();
6705  __ Ret();
6706
6707  // Need to collect. Call into runtime system.
6708  __ bind(&gc);
6709  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
6710}
6711
6712
6713void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
6714  // Stack layout on entry:
6715  //
6716  // [sp]: constant elements.
6717  // [sp + kPointerSize]: literal index.
6718  // [sp + (2 * kPointerSize)]: literals array.
6719
6720  // All sizes here are multiples of kPointerSize.
6721  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
6722  int size = JSArray::kSize + elements_size;
6723
6724  // Load boilerplate object into r3 and check if we need to create a
6725  // boilerplate.
6726  Label slow_case;
6727  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
6728  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6729  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6730  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6731  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6732  __ cmp(r3, ip);
6733  __ b(eq, &slow_case);
6734
6735  // Allocate both the JS array and the elements array in one big
6736  // allocation. This avoids multiple limit checks.
6737  __ AllocateInNewSpace(size,
6738                        r0,
6739                        r1,
6740                        r2,
6741                        &slow_case,
6742                        TAG_OBJECT);
6743
6744  // Copy the JS array part.
6745  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
6746    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
6747      __ ldr(r1, FieldMemOperand(r3, i));
6748      __ str(r1, FieldMemOperand(r0, i));
6749    }
6750  }
6751
6752  if (length_ > 0) {
6753    // Get hold of the elements array of the boilerplate and setup the
6754    // elements pointer in the resulting object.
6755    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
6756    __ add(r2, r0, Operand(JSArray::kSize));
6757    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
6758
6759    // Copy the elements array.
6760    for (int i = 0; i < elements_size; i += kPointerSize) {
6761      __ ldr(r1, FieldMemOperand(r3, i));
6762      __ str(r1, FieldMemOperand(r2, i));
6763    }
6764  }
6765
6766  // Return and remove the on-stack parameters.
6767  __ add(sp, sp, Operand(3 * kPointerSize));
6768  __ Ret();
6769
6770  __ bind(&slow_case);
6771  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
6772}
6773
6774
6775// Takes a Smi and converts to an IEEE 64 bit floating point value in two
6776// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
6777// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
6778// scratch register.  Destroys the source register.  No GC occurs during this
6779// stub so you don't have to set up the frame.
6780class ConvertToDoubleStub : public CodeStub {
6781 public:
6782  ConvertToDoubleStub(Register result_reg_1,
6783                      Register result_reg_2,
6784                      Register source_reg,
6785                      Register scratch_reg)
6786      : result1_(result_reg_1),
6787        result2_(result_reg_2),
6788        source_(source_reg),
6789        zeros_(scratch_reg) { }
6790
6791 private:
6792  Register result1_;
6793  Register result2_;
6794  Register source_;
6795  Register zeros_;
6796
6797  // Minor key encoding in 16 bits.
6798  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
6799  class OpBits: public BitField<Token::Value, 2, 14> {};
6800
6801  Major MajorKey() { return ConvertToDouble; }
6802  int MinorKey() {
6803    // Encode the parameters in a unique 16 bit value.
6804    return  result1_.code() +
6805           (result2_.code() << 4) +
6806           (source_.code() << 8) +
6807           (zeros_.code() << 12);
6808  }
6809
6810  void Generate(MacroAssembler* masm);
6811
6812  const char* GetName() { return "ConvertToDoubleStub"; }
6813
6814#ifdef DEBUG
6815  void Print() { PrintF("ConvertToDoubleStub\n"); }
6816#endif
6817};
6818
6819
6820void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
6821#ifndef BIG_ENDIAN_FLOATING_POINT
6822  Register exponent = result1_;
6823  Register mantissa = result2_;
6824#else
6825  Register exponent = result2_;
6826  Register mantissa = result1_;
6827#endif
6828  Label not_special;
6829  // Convert from Smi to integer.
6830  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
6831  // Move sign bit from source to destination.  This works because the sign bit
6832  // in the exponent word of the double has the same position and polarity as
6833  // the 2's complement sign bit in a Smi.
6834  ASSERT(HeapNumber::kSignMask == 0x80000000u);
6835  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
6836  // Subtract from 0 if source was negative.
6837  __ rsb(source_, source_, Operand(0), LeaveCC, ne);
6838
6839  // We have -1, 0 or 1, which we treat specially. Register source_ contains
6840  // absolute value: it is either equal to 1 (special case of -1 and 1),
6841  // greater than 1 (not a special case) or less than 1 (special case of 0).
6842  __ cmp(source_, Operand(1));
6843  __ b(gt, &not_special);
6844
6845  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
6846  static const uint32_t exponent_word_for_1 =
6847      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
6848  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
6849  // 1, 0 and -1 all have 0 for the second word.
6850  __ mov(mantissa, Operand(0));
6851  __ Ret();
6852
6853  __ bind(&not_special);
6854  // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
6855  // Gets the wrong answer for 0, but we already checked for that case above.
6856  __ CountLeadingZeros(zeros_, source_, mantissa);
6857  // Compute exponent and or it into the exponent register.
6858  // We use mantissa as a scratch register here.  Use a fudge factor to
6859  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
6860  // that fit in the ARM's constant field.
6861  int fudge = 0x400;
6862  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
6863  __ add(mantissa, mantissa, Operand(fudge));
6864  __ orr(exponent,
6865         exponent,
6866         Operand(mantissa, LSL, HeapNumber::kExponentShift));
6867  // Shift up the source chopping the top bit off.
6868  __ add(zeros_, zeros_, Operand(1));
6869  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
6870  __ mov(source_, Operand(source_, LSL, zeros_));
6871  // Compute lower part of fraction (last 12 bits).
6872  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
6873  // And the top (top 20 bits).
6874  __ orr(exponent,
6875         exponent,
6876         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
6877  __ Ret();
6878}
6879
6880
6881// See comment for class.
6882void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
6883  Label max_negative_int;
6884  // the_int_ has the answer which is a signed int32 but not a Smi.
6885  // We test for the special value that has a different exponent.  This test
6886  // has the neat side effect of setting the flags according to the sign.
6887  ASSERT(HeapNumber::kSignMask == 0x80000000u);
6888  __ cmp(the_int_, Operand(0x80000000u));
6889  __ b(eq, &max_negative_int);
6890  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
6891  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
6892  uint32_t non_smi_exponent =
6893      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
6894  __ mov(scratch_, Operand(non_smi_exponent));
6895  // Set the sign bit in scratch_ if the value was negative.
6896  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
6897  // Subtract from 0 if the value was negative.
6898  __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
6899  // We should be masking the implict first digit of the mantissa away here,
6900  // but it just ends up combining harmlessly with the last digit of the
6901  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
6902  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
6903  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
6904  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
6905  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
6906  __ str(scratch_, FieldMemOperand(the_heap_number_,
6907                                   HeapNumber::kExponentOffset));
6908  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
6909  __ str(scratch_, FieldMemOperand(the_heap_number_,
6910                                   HeapNumber::kMantissaOffset));
6911  __ Ret();
6912
6913  __ bind(&max_negative_int);
6914  // The max negative int32 is stored as a positive number in the mantissa of
6915  // a double because it uses a sign bit instead of using two's complement.
6916  // The actual mantissa bits stored are all 0 because the implicit most
6917  // significant 1 bit is not stored.
6918  non_smi_exponent += 1 << HeapNumber::kExponentShift;
6919  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
6920  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
6921  __ mov(ip, Operand(0));
6922  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
6923  __ Ret();
6924}
6925
6926
6927// Handle the case where the lhs and rhs are the same object.
6928// Equality is almost reflexive (everything but NaN), so this is a test
6929// for "identity and not NaN".
6930static void EmitIdenticalObjectComparison(MacroAssembler* masm,
6931                                          Label* slow,
6932                                          Condition cc,
6933                                          bool never_nan_nan) {
6934  Label not_identical;
6935  Label heap_number, return_equal;
6936  __ cmp(r0, r1);
6937  __ b(ne, &not_identical);
6938
6939  // The two objects are identical.  If we know that one of them isn't NaN then
6940  // we now know they test equal.
6941  if (cc != eq || !never_nan_nan) {
6942    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6943    // so we do the second best thing - test it ourselves.
6944    // They are both equal and they are not both Smis so both of them are not
6945    // Smis.  If it's not a heap number, then return equal.
6946    if (cc == lt || cc == gt) {
6947      __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
6948      __ b(ge, slow);
6949    } else {
6950      __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6951      __ b(eq, &heap_number);
6952      // Comparing JS objects with <=, >= is complicated.
6953      if (cc != eq) {
6954        __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
6955        __ b(ge, slow);
6956        // Normally here we fall through to return_equal, but undefined is
6957        // special: (undefined == undefined) == true, but
6958        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
6959        if (cc == le || cc == ge) {
6960          __ cmp(r4, Operand(ODDBALL_TYPE));
6961          __ b(ne, &return_equal);
6962          __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
6963          __ cmp(r0, r2);
6964          __ b(ne, &return_equal);
6965          if (cc == le) {
6966            // undefined <= undefined should fail.
6967            __ mov(r0, Operand(GREATER));
6968          } else  {
6969            // undefined >= undefined should fail.
6970            __ mov(r0, Operand(LESS));
6971          }
6972          __ mov(pc, Operand(lr));       // Return.
6973        }
6974      }
6975    }
6976  }
6977
6978  __ bind(&return_equal);
6979  if (cc == lt) {
6980    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
6981  } else if (cc == gt) {
6982    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
6983  } else {
6984    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
6985  }
6986  __ mov(pc, Operand(lr));  // Return.
6987
6988  if (cc != eq || !never_nan_nan) {
6989    // For less and greater we don't have to check for NaN since the result of
6990    // x < x is false regardless.  For the others here is some code to check
6991    // for NaN.
6992    if (cc != lt && cc != gt) {
6993      __ bind(&heap_number);
6994      // It is a heap number, so return non-equal if it's NaN and equal if it's
6995      // not NaN.
6996
6997      // The representation of NaN values has all exponent bits (52..62) set,
6998      // and not all mantissa bits (0..51) clear.
6999      // Read top bits of double representation (second word of value).
7000      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
7001      // Test that exponent bits are all set.
7002      __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
7003      // NaNs have all-one exponents so they sign extend to -1.
7004      __ cmp(r3, Operand(-1));
7005      __ b(ne, &return_equal);
7006
7007      // Shift out flag and all exponent bits, retaining only mantissa.
7008      __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
7009      // Or with all low-bits of mantissa.
7010      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
7011      __ orr(r0, r3, Operand(r2), SetCC);
7012      // For equal we already have the right value in r0:  Return zero (equal)
7013      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
7014      // not (it's a NaN).  For <= and >= we need to load r0 with the failing
7015      // value if it's a NaN.
7016      if (cc != eq) {
7017        // All-zero means Infinity means equal.
7018        __ mov(pc, Operand(lr), LeaveCC, eq);  // Return equal
7019        if (cc == le) {
7020          __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
7021        } else {
7022          __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
7023        }
7024      }
7025      __ mov(pc, Operand(lr));  // Return.
7026    }
7027    // No fall through here.
7028  }
7029
7030  __ bind(&not_identical);
7031}
7032
7033
7034// See comment at call site.
7035static void EmitSmiNonsmiComparison(MacroAssembler* masm,
7036                                    Label* lhs_not_nan,
7037                                    Label* slow,
7038                                    bool strict) {
7039  Label rhs_is_smi;
7040  __ tst(r0, Operand(kSmiTagMask));
7041  __ b(eq, &rhs_is_smi);
7042
7043  // Lhs is a Smi.  Check whether the rhs is a heap number.
7044  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
7045  if (strict) {
7046    // If rhs is not a number and lhs is a Smi then strict equality cannot
7047    // succeed.  Return non-equal (r0 is already not zero)
7048    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
7049  } else {
7050    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
7051    // the runtime.
7052    __ b(ne, slow);
7053  }
7054
7055  // Lhs (r1) is a smi, rhs (r0) is a number.
7056  if (CpuFeatures::IsSupported(VFP3)) {
7057    // Convert lhs to a double in d7              .
7058    CpuFeatures::Scope scope(VFP3);
7059    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
7060    __ vmov(s15, r7);
7061    __ vcvt_f64_s32(d7, s15);
7062    // Load the double from rhs, tagged HeapNumber r0, to d6.
7063    __ sub(r7, r0, Operand(kHeapObjectTag));
7064    __ vldr(d6, r7, HeapNumber::kValueOffset);
7065  } else {
7066    __ push(lr);
7067    // Convert lhs to a double in r2, r3.
7068    __ mov(r7, Operand(r1));
7069    ConvertToDoubleStub stub1(r3, r2, r7, r6);
7070    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
7071    // Load rhs to a double in r0, r1.
7072    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
7073    __ pop(lr);
7074  }
7075
7076  // We now have both loaded as doubles but we can skip the lhs nan check
7077  // since it's a smi.
7078  __ jmp(lhs_not_nan);
7079
7080  __ bind(&rhs_is_smi);
7081  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
7082  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
7083  if (strict) {
7084    // If lhs is not a number and rhs is a smi then strict equality cannot
7085    // succeed.  Return non-equal.
7086    __ mov(r0, Operand(1), LeaveCC, ne);  // Non-zero indicates not equal.
7087    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
7088  } else {
7089    // Smi compared non-strictly with a non-smi non-heap-number.  Call
7090    // the runtime.
7091    __ b(ne, slow);
7092  }
7093
7094  // Rhs (r0) is a smi, lhs (r1) is a heap number.
7095  if (CpuFeatures::IsSupported(VFP3)) {
7096    // Convert rhs to a double in d6              .
7097    CpuFeatures::Scope scope(VFP3);
7098    // Load the double from lhs, tagged HeapNumber r1, to d7.
7099    __ sub(r7, r1, Operand(kHeapObjectTag));
7100    __ vldr(d7, r7, HeapNumber::kValueOffset);
7101    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
7102    __ vmov(s13, r7);
7103    __ vcvt_f64_s32(d6, s13);
7104  } else {
7105    __ push(lr);
7106    // Load lhs to a double in r2, r3.
7107    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
7108    // Convert rhs to a double in r0, r1.
7109    __ mov(r7, Operand(r0));
7110    ConvertToDoubleStub stub2(r1, r0, r7, r6);
7111    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
7112    __ pop(lr);
7113  }
7114  // Fall through to both_loaded_as_doubles.
7115}
7116
7117
7118void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
7119  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
7120  Register rhs_exponent = exp_first ? r0 : r1;
7121  Register lhs_exponent = exp_first ? r2 : r3;
7122  Register rhs_mantissa = exp_first ? r1 : r0;
7123  Register lhs_mantissa = exp_first ? r3 : r2;
7124  Label one_is_nan, neither_is_nan;
7125
7126  __ Sbfx(r4,
7127          lhs_exponent,
7128          HeapNumber::kExponentShift,
7129          HeapNumber::kExponentBits);
7130  // NaNs have all-one exponents so they sign extend to -1.
7131  __ cmp(r4, Operand(-1));
7132  __ b(ne, lhs_not_nan);
7133  __ mov(r4,
7134         Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
7135         SetCC);
7136  __ b(ne, &one_is_nan);
7137  __ cmp(lhs_mantissa, Operand(0));
7138  __ b(ne, &one_is_nan);
7139
7140  __ bind(lhs_not_nan);
7141  __ Sbfx(r4,
7142          rhs_exponent,
7143          HeapNumber::kExponentShift,
7144          HeapNumber::kExponentBits);
7145  // NaNs have all-one exponents so they sign extend to -1.
7146  __ cmp(r4, Operand(-1));
7147  __ b(ne, &neither_is_nan);
7148  __ mov(r4,
7149         Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
7150         SetCC);
7151  __ b(ne, &one_is_nan);
7152  __ cmp(rhs_mantissa, Operand(0));
7153  __ b(eq, &neither_is_nan);
7154
7155  __ bind(&one_is_nan);
7156  // NaN comparisons always fail.
7157  // Load whatever we need in r0 to make the comparison fail.
7158  if (cc == lt || cc == le) {
7159    __ mov(r0, Operand(GREATER));
7160  } else {
7161    __ mov(r0, Operand(LESS));
7162  }
7163  __ mov(pc, Operand(lr));  // Return.
7164
7165  __ bind(&neither_is_nan);
7166}
7167
7168
7169// See comment at call site.
7170static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
7171  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
7172  Register rhs_exponent = exp_first ? r0 : r1;
7173  Register lhs_exponent = exp_first ? r2 : r3;
7174  Register rhs_mantissa = exp_first ? r1 : r0;
7175  Register lhs_mantissa = exp_first ? r3 : r2;
7176
7177  // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
7178  if (cc == eq) {
7179    // Doubles are not equal unless they have the same bit pattern.
7180    // Exception: 0 and -0.
7181    __ cmp(rhs_mantissa, Operand(lhs_mantissa));
7182    __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
7183    // Return non-zero if the numbers are unequal.
7184    __ mov(pc, Operand(lr), LeaveCC, ne);
7185
7186    __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
7187    // If exponents are equal then return 0.
7188    __ mov(pc, Operand(lr), LeaveCC, eq);
7189
7190    // Exponents are unequal.  The only way we can return that the numbers
7191    // are equal is if one is -0 and the other is 0.  We already dealt
7192    // with the case where both are -0 or both are 0.
7193    // We start by seeing if the mantissas (that are equal) or the bottom
7194    // 31 bits of the rhs exponent are non-zero.  If so we return not
7195    // equal.
7196    __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
7197    __ mov(r0, Operand(r4), LeaveCC, ne);
7198    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return conditionally.
7199    // Now they are equal if and only if the lhs exponent is zero in its
7200    // low 31 bits.
7201    __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
7202    __ mov(pc, Operand(lr));
7203  } else {
7204    // Call a native function to do a comparison between two non-NaNs.
7205    // Call C routine that may not cause GC or other trouble.
7206    __ push(lr);
7207    __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
7208    __ CallCFunction(ExternalReference::compare_doubles(), 4);
7209    __ pop(pc);  // Return.
7210  }
7211}
7212
7213
7214// See comment at call site.
7215static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
7216    // If either operand is a JSObject or an oddball value, then they are
7217    // not equal since their pointers are different.
7218    // There is no test for undetectability in strict equality.
7219    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
7220    Label first_non_object;
7221    // Get the type of the first operand into r2 and compare it with
7222    // FIRST_JS_OBJECT_TYPE.
7223    __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
7224    __ b(lt, &first_non_object);
7225
7226    // Return non-zero (r0 is not zero)
7227    Label return_not_equal;
7228    __ bind(&return_not_equal);
7229    __ mov(pc, Operand(lr));  // Return.
7230
7231    __ bind(&first_non_object);
7232    // Check for oddballs: true, false, null, undefined.
7233    __ cmp(r2, Operand(ODDBALL_TYPE));
7234    __ b(eq, &return_not_equal);
7235
7236    __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
7237    __ b(ge, &return_not_equal);
7238
7239    // Check for oddballs: true, false, null, undefined.
7240    __ cmp(r3, Operand(ODDBALL_TYPE));
7241    __ b(eq, &return_not_equal);
7242
7243    // Now that we have the types we might as well check for symbol-symbol.
7244    // Ensure that no non-strings have the symbol bit set.
7245    ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
7246    ASSERT(kSymbolTag != 0);
7247    __ and_(r2, r2, Operand(r3));
7248    __ tst(r2, Operand(kIsSymbolMask));
7249    __ b(ne, &return_not_equal);
7250}
7251
7252
7253// See comment at call site.
7254static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
7255                                       Label* both_loaded_as_doubles,
7256                                       Label* not_heap_numbers,
7257                                       Label* slow) {
7258  __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
7259  __ b(ne, not_heap_numbers);
7260  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
7261  __ cmp(r2, r3);
7262  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
7263
7264  // Both are heap numbers.  Load them up then jump to the code we have
7265  // for that.
7266  if (CpuFeatures::IsSupported(VFP3)) {
7267    CpuFeatures::Scope scope(VFP3);
7268    __ sub(r7, r0, Operand(kHeapObjectTag));
7269    __ vldr(d6, r7, HeapNumber::kValueOffset);
7270    __ sub(r7, r1, Operand(kHeapObjectTag));
7271    __ vldr(d7, r7, HeapNumber::kValueOffset);
7272  } else {
7273    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
7274    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
7275  }
7276  __ jmp(both_loaded_as_doubles);
7277}
7278
7279
7280// Fast negative check for symbol-to-symbol equality.
7281static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
7282                                         Label* possible_strings,
7283                                         Label* not_both_strings) {
7284  // r2 is object type of r0.
7285  // Ensure that no non-strings have the symbol bit set.
7286  Label object_test;
7287  ASSERT(kSymbolTag != 0);
7288  __ tst(r2, Operand(kIsNotStringMask));
7289  __ b(ne, &object_test);
7290  __ tst(r2, Operand(kIsSymbolMask));
7291  __ b(eq, possible_strings);
7292  __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
7293  __ b(ge, not_both_strings);
7294  __ tst(r3, Operand(kIsSymbolMask));
7295  __ b(eq, possible_strings);
7296
7297  // Both are symbols.  We already checked they weren't the same pointer
7298  // so they are not equal.
7299  __ mov(r0, Operand(1));   // Non-zero indicates not equal.
7300  __ mov(pc, Operand(lr));  // Return.
7301
7302  __ bind(&object_test);
7303  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
7304  __ b(lt, not_both_strings);
7305  __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
7306  __ b(lt, not_both_strings);
7307  // If both objects are undetectable, they are equal.  Otherwise, they
7308  // are not equal, since they are different objects and an object is not
7309  // equal to undefined.
7310  __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
7311  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
7312  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
7313  __ and_(r0, r2, Operand(r3));
7314  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
7315  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
7316  __ mov(pc, Operand(lr));  // Return.
7317}
7318
7319
7320void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
7321                                                         Register object,
7322                                                         Register result,
7323                                                         Register scratch1,
7324                                                         Register scratch2,
7325                                                         Register scratch3,
7326                                                         bool object_is_smi,
7327                                                         Label* not_found) {
7328  // Use of registers. Register result is used as a temporary.
7329  Register number_string_cache = result;
7330  Register mask = scratch3;
7331
7332  // Load the number string cache.
7333  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
7334
7335  // Make the hash mask from the length of the number string cache. It
7336  // contains two elements (number and string) for each cache entry.
7337  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
7338  // Divide length by two (length is a smi).
7339  __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
7340  __ sub(mask, mask, Operand(1));  // Make mask.
7341
7342  // Calculate the entry in the number string cache. The hash value in the
7343  // number string cache for smis is just the smi value, and the hash for
7344  // doubles is the xor of the upper and lower words. See
7345  // Heap::GetNumberStringCache.
7346  Label is_smi;
7347  Label load_result_from_cache;
7348  if (!object_is_smi) {
7349    __ BranchOnSmi(object, &is_smi);
7350    if (CpuFeatures::IsSupported(VFP3)) {
7351      CpuFeatures::Scope scope(VFP3);
7352      __ CheckMap(object,
7353                  scratch1,
7354                  Heap::kHeapNumberMapRootIndex,
7355                  not_found,
7356                  true);
7357
7358      ASSERT_EQ(8, kDoubleSize);
7359      __ add(scratch1,
7360             object,
7361             Operand(HeapNumber::kValueOffset - kHeapObjectTag));
7362      __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
7363      __ eor(scratch1, scratch1, Operand(scratch2));
7364      __ and_(scratch1, scratch1, Operand(mask));
7365
7366      // Calculate address of entry in string cache: each entry consists
7367      // of two pointer sized fields.
7368      __ add(scratch1,
7369             number_string_cache,
7370             Operand(scratch1, LSL, kPointerSizeLog2 + 1));
7371
7372      Register probe = mask;
7373      __ ldr(probe,
7374             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
7375      __ BranchOnSmi(probe, not_found);
7376      __ sub(scratch2, object, Operand(kHeapObjectTag));
7377      __ vldr(d0, scratch2, HeapNumber::kValueOffset);
7378      __ sub(probe, probe, Operand(kHeapObjectTag));
7379      __ vldr(d1, probe, HeapNumber::kValueOffset);
7380      __ vcmp(d0, d1);
7381      __ vmrs(pc);
7382      __ b(ne, not_found);  // The cache did not contain this value.
7383      __ b(&load_result_from_cache);
7384    } else {
7385      __ b(not_found);
7386    }
7387  }
7388
7389  __ bind(&is_smi);
7390  Register scratch = scratch1;
7391  __ and_(scratch, mask, Operand(object, ASR, 1));
7392  // Calculate address of entry in string cache: each entry consists
7393  // of two pointer sized fields.
7394  __ add(scratch,
7395         number_string_cache,
7396         Operand(scratch, LSL, kPointerSizeLog2 + 1));
7397
7398  // Check if the entry is the smi we are looking for.
7399  Register probe = mask;
7400  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
7401  __ cmp(object, probe);
7402  __ b(ne, not_found);
7403
7404  // Get the result from the cache.
7405  __ bind(&load_result_from_cache);
7406  __ ldr(result,
7407         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
7408  __ IncrementCounter(&Counters::number_to_string_native,
7409                      1,
7410                      scratch1,
7411                      scratch2);
7412}
7413
7414
7415void NumberToStringStub::Generate(MacroAssembler* masm) {
7416  Label runtime;
7417
7418  __ ldr(r1, MemOperand(sp, 0));
7419
7420  // Generate code to lookup number in the number string cache.
7421  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
7422  __ add(sp, sp, Operand(1 * kPointerSize));
7423  __ Ret();
7424
7425  __ bind(&runtime);
7426  // Handle number to string in the runtime system if not found in the cache.
7427  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
7428}
7429
7430
7431void RecordWriteStub::Generate(MacroAssembler* masm) {
7432  __ add(offset_, object_, Operand(offset_));
7433  __ RecordWriteHelper(object_, offset_, scratch_);
7434  __ Ret();
7435}
7436
7437
7438// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
7439// On exit r0 is 0, positive or negative to indicate the result of
7440// the comparison.
7441void CompareStub::Generate(MacroAssembler* masm) {
7442  Label slow;  // Call builtin.
7443  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
7444
7445  // NOTICE! This code is only reached after a smi-fast-case check, so
7446  // it is certain that at least one operand isn't a smi.
7447
7448  // Handle the case where the objects are identical.  Either returns the answer
7449  // or goes to slow.  Only falls through if the objects were not identical.
7450  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
7451
7452  // If either is a Smi (we know that not both are), then they can only
7453  // be strictly equal if the other is a HeapNumber.
7454  ASSERT_EQ(0, kSmiTag);
7455  ASSERT_EQ(0, Smi::FromInt(0));
7456  __ and_(r2, r0, Operand(r1));
7457  __ tst(r2, Operand(kSmiTagMask));
7458  __ b(ne, &not_smis);
7459  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
7460  // 1) Return the answer.
7461  // 2) Go to slow.
7462  // 3) Fall through to both_loaded_as_doubles.
7463  // 4) Jump to lhs_not_nan.
7464  // In cases 3 and 4 we have found out we were dealing with a number-number
7465  // comparison.  If VFP3 is supported the double values of the numbers have
7466  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
7467  // into r0, r1, r2, and r3.
7468  EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
7469
7470  __ bind(&both_loaded_as_doubles);
7471  // The arguments have been converted to doubles and stored in d6 and d7, if
7472  // VFP3 is supported, or in r0, r1, r2, and r3.
7473  if (CpuFeatures::IsSupported(VFP3)) {
7474    __ bind(&lhs_not_nan);
7475    CpuFeatures::Scope scope(VFP3);
7476    Label no_nan;
7477    // ARMv7 VFP3 instructions to implement double precision comparison.
7478    __ vcmp(d7, d6);
7479    __ vmrs(pc);  // Move vector status bits to normal status bits.
7480    Label nan;
7481    __ b(vs, &nan);
7482    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
7483    __ mov(r0, Operand(LESS), LeaveCC, lt);
7484    __ mov(r0, Operand(GREATER), LeaveCC, gt);
7485    __ mov(pc, Operand(lr));
7486
7487    __ bind(&nan);
7488    // If one of the sides was a NaN then the v flag is set.  Load r0 with
7489    // whatever it takes to make the comparison fail, since comparisons with NaN
7490    // always fail.
7491    if (cc_ == lt || cc_ == le) {
7492      __ mov(r0, Operand(GREATER));
7493    } else {
7494      __ mov(r0, Operand(LESS));
7495    }
7496    __ mov(pc, Operand(lr));
7497  } else {
7498    // Checks for NaN in the doubles we have loaded.  Can return the answer or
7499    // fall through if neither is a NaN.  Also binds lhs_not_nan.
7500    EmitNanCheck(masm, &lhs_not_nan, cc_);
7501    // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the
7502    // answer.  Never falls through.
7503    EmitTwoNonNanDoubleComparison(masm, cc_);
7504  }
7505
7506  __ bind(&not_smis);
7507  // At this point we know we are dealing with two different objects,
7508  // and neither of them is a Smi.  The objects are in r0 and r1.
7509  if (strict_) {
7510    // This returns non-equal for some object types, or falls through if it
7511    // was not lucky.
7512    EmitStrictTwoHeapObjectCompare(masm);
7513  }
7514
7515  Label check_for_symbols;
7516  Label flat_string_check;
7517  // Check for heap-number-heap-number comparison.  Can jump to slow case,
7518  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
7519  // that case.  If the inputs are not doubles then jumps to check_for_symbols.
7520  // In this case r2 will contain the type of r0.  Never falls through.
7521  EmitCheckForTwoHeapNumbers(masm,
7522                             &both_loaded_as_doubles,
7523                             &check_for_symbols,
7524                             &flat_string_check);
7525
7526  __ bind(&check_for_symbols);
7527  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
7528  // symbols.
7529  if (cc_ == eq && !strict_) {
7530    // Returns an answer for two symbols or two detectable objects.
7531    // Otherwise jumps to string case or not both strings case.
7532    // Assumes that r2 is the type of r0 on entry.
7533    EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow);
7534  }
7535
7536  // Check for both being sequential ASCII strings, and inline if that is the
7537  // case.
7538  __ bind(&flat_string_check);
7539
7540  __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
7541
7542  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
7543  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
7544                                                     r1,
7545                                                     r0,
7546                                                     r2,
7547                                                     r3,
7548                                                     r4,
7549                                                     r5);
7550  // Never falls through to here.
7551
7552  __ bind(&slow);
7553
7554  __ Push(r1, r0);
7555  // Figure out which native to call and setup the arguments.
7556  Builtins::JavaScript native;
7557  if (cc_ == eq) {
7558    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
7559  } else {
7560    native = Builtins::COMPARE;
7561    int ncr;  // NaN compare result
7562    if (cc_ == lt || cc_ == le) {
7563      ncr = GREATER;
7564    } else {
7565      ASSERT(cc_ == gt || cc_ == ge);  // remaining cases
7566      ncr = LESS;
7567    }
7568    __ mov(r0, Operand(Smi::FromInt(ncr)));
7569    __ push(r0);
7570  }
7571
7572  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
7573  // tagged as a small integer.
7574  __ InvokeBuiltin(native, JUMP_JS);
7575}
7576
7577
7578// We fall into this code if the operands were Smis, but the result was
7579// not (eg. overflow).  We branch into this code (to the not_smi label) if
7580// the operands were not both Smi.  The operands are in r0 and r1.  In order
7581// to call the C-implemented binary fp operation routines we need to end up
7582// with the double precision floating point operands in r0 and r1 (for the
7583// value in r1) and r2 and r3 (for the value in r0).
7584void GenericBinaryOpStub::HandleBinaryOpSlowCases(
7585    MacroAssembler* masm,
7586    Label* not_smi,
7587    Register lhs,
7588    Register rhs,
7589    const Builtins::JavaScript& builtin) {
7590  Label slow, slow_reverse, do_the_call;
7591  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
7592
7593  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
7594  Register heap_number_map = r6;
7595
7596  if (ShouldGenerateSmiCode()) {
7597    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7598
7599    // Smi-smi case (overflow).
7600    // Since both are Smis there is no heap number to overwrite, so allocate.
7601    // The new heap number is in r5.  r3 and r7 are scratch.
7602    __ AllocateHeapNumber(
7603        r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
7604
7605    // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
7606    // using registers d7 and d6 for the double values.
7607    if (CpuFeatures::IsSupported(VFP3)) {
7608      CpuFeatures::Scope scope(VFP3);
7609      __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
7610      __ vmov(s15, r7);
7611      __ vcvt_f64_s32(d7, s15);
7612      __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
7613      __ vmov(s13, r7);
7614      __ vcvt_f64_s32(d6, s13);
7615      if (!use_fp_registers) {
7616        __ vmov(r2, r3, d7);
7617        __ vmov(r0, r1, d6);
7618      }
7619    } else {
7620      // Write Smi from rhs to r3 and r2 in double format.  r9 is scratch.
7621      __ mov(r7, Operand(rhs));
7622      ConvertToDoubleStub stub1(r3, r2, r7, r9);
7623      __ push(lr);
7624      __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
7625      // Write Smi from lhs to r1 and r0 in double format.  r9 is scratch.
7626      __ mov(r7, Operand(lhs));
7627      ConvertToDoubleStub stub2(r1, r0, r7, r9);
7628      __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
7629      __ pop(lr);
7630    }
7631    __ jmp(&do_the_call);  // Tail call.  No return.
7632  }
7633
7634  // We branch here if at least one of r0 and r1 is not a Smi.
7635  __ bind(not_smi);
7636  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7637
7638  // After this point we have the left hand side in r1 and the right hand side
7639  // in r0.
7640  if (lhs.is(r0)) {
7641    __ Swap(r0, r1, ip);
7642  }
7643
7644  if (ShouldGenerateFPCode()) {
7645    Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
7646
7647    if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
7648      switch (op_) {
7649        case Token::ADD:
7650        case Token::SUB:
7651        case Token::MUL:
7652        case Token::DIV:
7653          GenerateTypeTransition(masm);
7654          break;
7655
7656        default:
7657          break;
7658      }
7659      // Restore heap number map register.
7660      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7661    }
7662
7663    if (mode_ == NO_OVERWRITE) {
7664      // In the case where there is no chance of an overwritable float we may as
7665      // well do the allocation immediately while r0 and r1 are untouched.
7666      __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
7667    }
7668
7669    // Move r0 to a double in r2-r3.
7670    __ tst(r0, Operand(kSmiTagMask));
7671    __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
7672    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7673    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7674    __ cmp(r4, heap_number_map);
7675    __ b(ne, &slow);
7676    if (mode_ == OVERWRITE_RIGHT) {
7677      __ mov(r5, Operand(r0));  // Overwrite this heap number.
7678    }
7679    if (use_fp_registers) {
7680      CpuFeatures::Scope scope(VFP3);
7681      // Load the double from tagged HeapNumber r0 to d7.
7682      __ sub(r7, r0, Operand(kHeapObjectTag));
7683      __ vldr(d7, r7, HeapNumber::kValueOffset);
7684    } else {
7685      // Calling convention says that second double is in r2 and r3.
7686      __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
7687    }
7688    __ jmp(&finished_loading_r0);
7689    __ bind(&r0_is_smi);
7690    if (mode_ == OVERWRITE_RIGHT) {
7691      // We can't overwrite a Smi so get address of new heap number into r5.
7692    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7693    }
7694
7695    if (CpuFeatures::IsSupported(VFP3)) {
7696      CpuFeatures::Scope scope(VFP3);
7697      // Convert smi in r0 to double in d7.
7698      __ mov(r7, Operand(r0, ASR, kSmiTagSize));
7699      __ vmov(s15, r7);
7700      __ vcvt_f64_s32(d7, s15);
7701      if (!use_fp_registers) {
7702        __ vmov(r2, r3, d7);
7703      }
7704    } else {
7705      // Write Smi from r0 to r3 and r2 in double format.
7706      __ mov(r7, Operand(r0));
7707      ConvertToDoubleStub stub3(r3, r2, r7, r4);
7708      __ push(lr);
7709      __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
7710      __ pop(lr);
7711    }
7712
7713    // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
7714    // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
7715    Label r1_is_not_smi;
7716    if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
7717      __ tst(r1, Operand(kSmiTagMask));
7718      __ b(ne, &r1_is_not_smi);
7719      GenerateTypeTransition(masm);
7720      // Restore heap number map register.
7721      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7722      __ jmp(&r1_is_smi);
7723    }
7724
7725    __ bind(&finished_loading_r0);
7726
7727    // Move r1 to a double in r0-r1.
7728    __ tst(r1, Operand(kSmiTagMask));
7729    __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
7730    __ bind(&r1_is_not_smi);
7731    __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
7732    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7733    __ cmp(r4, heap_number_map);
7734    __ b(ne, &slow);
7735    if (mode_ == OVERWRITE_LEFT) {
7736      __ mov(r5, Operand(r1));  // Overwrite this heap number.
7737    }
7738    if (use_fp_registers) {
7739      CpuFeatures::Scope scope(VFP3);
7740      // Load the double from tagged HeapNumber r1 to d6.
7741      __ sub(r7, r1, Operand(kHeapObjectTag));
7742      __ vldr(d6, r7, HeapNumber::kValueOffset);
7743    } else {
7744      // Calling convention says that first double is in r0 and r1.
7745      __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
7746    }
7747    __ jmp(&finished_loading_r1);
7748    __ bind(&r1_is_smi);
7749    if (mode_ == OVERWRITE_LEFT) {
7750      // We can't overwrite a Smi so get address of new heap number into r5.
7751    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7752    }
7753
7754    if (CpuFeatures::IsSupported(VFP3)) {
7755      CpuFeatures::Scope scope(VFP3);
7756      // Convert smi in r1 to double in d6.
7757      __ mov(r7, Operand(r1, ASR, kSmiTagSize));
7758      __ vmov(s13, r7);
7759      __ vcvt_f64_s32(d6, s13);
7760      if (!use_fp_registers) {
7761        __ vmov(r0, r1, d6);
7762      }
7763    } else {
7764      // Write Smi from r1 to r1 and r0 in double format.
7765      __ mov(r7, Operand(r1));
7766      ConvertToDoubleStub stub4(r1, r0, r7, r9);
7767      __ push(lr);
7768      __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
7769      __ pop(lr);
7770    }
7771
7772    __ bind(&finished_loading_r1);
7773
7774    __ bind(&do_the_call);
7775    // If we are inlining the operation using VFP3 instructions for
7776    // add, subtract, multiply, or divide, the arguments are in d6 and d7.
7777    if (use_fp_registers) {
7778      CpuFeatures::Scope scope(VFP3);
7779      // ARMv7 VFP3 instructions to implement
7780      // double precision, add, subtract, multiply, divide.
7781
7782      if (Token::MUL == op_) {
7783        __ vmul(d5, d6, d7);
7784      } else if (Token::DIV == op_) {
7785        __ vdiv(d5, d6, d7);
7786      } else if (Token::ADD == op_) {
7787        __ vadd(d5, d6, d7);
7788      } else if (Token::SUB == op_) {
7789        __ vsub(d5, d6, d7);
7790      } else {
7791        UNREACHABLE();
7792      }
7793      __ sub(r0, r5, Operand(kHeapObjectTag));
7794      __ vstr(d5, r0, HeapNumber::kValueOffset);
7795      __ add(r0, r0, Operand(kHeapObjectTag));
7796      __ mov(pc, lr);
7797    } else {
7798      // If we did not inline the operation, then the arguments are in:
7799      // r0: Left value (least significant part of mantissa).
7800      // r1: Left value (sign, exponent, top of mantissa).
7801      // r2: Right value (least significant part of mantissa).
7802      // r3: Right value (sign, exponent, top of mantissa).
7803      // r5: Address of heap number for result.
7804
7805      __ push(lr);   // For later.
7806      __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments.
7807      // Call C routine that may not cause GC or other trouble. r5 is callee
7808      // save.
7809      __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
7810      // Store answer in the overwritable heap number.
7811  #if !defined(USE_ARM_EABI)
7812      // Double returned in fp coprocessor register 0 and 1, encoded as register
7813      // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
7814      // substract the tag from r5.
7815      __ sub(r4, r5, Operand(kHeapObjectTag));
7816      __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
7817  #else
7818      // Double returned in registers 0 and 1.
7819      __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
7820  #endif
7821      __ mov(r0, Operand(r5));
7822      // And we are done.
7823      __ pop(pc);
7824    }
7825  }
7826
7827  if (lhs.is(r0)) {
7828    __ b(&slow);
7829    __ bind(&slow_reverse);
7830    __ Swap(r0, r1, ip);
7831  }
7832
7833  heap_number_map = no_reg;  // Don't use this any more from here on.
7834
7835  // We jump to here if something goes wrong (one param is not a number of any
7836  // sort or new-space allocation fails).
7837  __ bind(&slow);
7838
7839  // Push arguments to the stack
7840  __ Push(r1, r0);
7841
7842  if (Token::ADD == op_) {
7843    // Test for string arguments before calling runtime.
7844    // r1 : first argument
7845    // r0 : second argument
7846    // sp[0] : second argument
7847    // sp[4] : first argument
7848
7849    Label not_strings, not_string1, string1, string1_smi2;
7850    __ tst(r1, Operand(kSmiTagMask));
7851    __ b(eq, &not_string1);
7852    __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
7853    __ b(ge, &not_string1);
7854
7855    // First argument is a a string, test second.
7856    __ tst(r0, Operand(kSmiTagMask));
7857    __ b(eq, &string1_smi2);
7858    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7859    __ b(ge, &string1);
7860
7861    // First and second argument are strings.
7862    StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
7863    __ TailCallStub(&string_add_stub);
7864
7865    __ bind(&string1_smi2);
7866    // First argument is a string, second is a smi. Try to lookup the number
7867    // string for the smi in the number string cache.
7868    NumberToStringStub::GenerateLookupNumberStringCache(
7869        masm, r0, r2, r4, r5, r6, true, &string1);
7870
7871    // Replace second argument on stack and tailcall string add stub to make
7872    // the result.
7873    __ str(r2, MemOperand(sp, 0));
7874    __ TailCallStub(&string_add_stub);
7875
7876    // Only first argument is a string.
7877    __ bind(&string1);
7878    __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
7879
7880    // First argument was not a string, test second.
7881    __ bind(&not_string1);
7882    __ tst(r0, Operand(kSmiTagMask));
7883    __ b(eq, &not_strings);
7884    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7885    __ b(ge, &not_strings);
7886
7887    // Only second argument is a string.
7888    __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
7889
7890    __ bind(&not_strings);
7891  }
7892
7893  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
7894}
7895
7896
7897// Tries to get a signed int32 out of a double precision floating point heap
7898// number.  Rounds towards 0.  Fastest for doubles that are in the ranges
7899// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff.  This corresponds
7900// almost to the range of signed int32 values that are not Smis.  Jumps to the
7901// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
7902// (excluding the endpoints).
7903static void GetInt32(MacroAssembler* masm,
7904                     Register source,
7905                     Register dest,
7906                     Register scratch,
7907                     Register scratch2,
7908                     Label* slow) {
7909  Label right_exponent, done;
7910  // Get exponent word.
7911  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
7912  // Get exponent alone in scratch2.
7913  __ Ubfx(scratch2,
7914          scratch,
7915          HeapNumber::kExponentShift,
7916          HeapNumber::kExponentBits);
7917  // Load dest with zero.  We use this either for the final shift or
7918  // for the answer.
7919  __ mov(dest, Operand(0));
7920  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
7921  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
7922  // the exponent that we are fastest at and also the highest exponent we can
7923  // handle here.
7924  const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
7925  // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
7926  // split it up to avoid a constant pool entry.  You can't do that in general
7927  // for cmp because of the overflow flag, but we know the exponent is in the
7928  // range 0-2047 so there is no overflow.
7929  int fudge_factor = 0x400;
7930  __ sub(scratch2, scratch2, Operand(fudge_factor));
7931  __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
7932  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
7933  __ b(eq, &right_exponent);
7934  // If the exponent is higher than that then go to slow case.  This catches
7935  // numbers that don't fit in a signed int32, infinities and NaNs.
7936  __ b(gt, slow);
7937
7938  // We know the exponent is smaller than 30 (biased).  If it is less than
7939  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
7940  // it rounds to zero.
7941  const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
7942  __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
7943  // Dest already has a Smi zero.
7944  __ b(lt, &done);
7945  if (!CpuFeatures::IsSupported(VFP3)) {
7946    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
7947    // get how much to shift down.
7948    __ rsb(dest, scratch2, Operand(30));
7949  }
7950  __ bind(&right_exponent);
7951  if (CpuFeatures::IsSupported(VFP3)) {
7952    CpuFeatures::Scope scope(VFP3);
7953    // ARMv7 VFP3 instructions implementing double precision to integer
7954    // conversion using round to zero.
7955    __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
7956    __ vmov(d7, scratch2, scratch);
7957    __ vcvt_s32_f64(s15, d7);
7958    __ vmov(dest, s15);
7959  } else {
7960    // Get the top bits of the mantissa.
7961    __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
7962    // Put back the implicit 1.
7963    __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
7964    // Shift up the mantissa bits to take up the space the exponent used to
7965    // take. We just orred in the implicit bit so that took care of one and
7966    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
7967    // distance.
7968    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
7969    __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
7970    // Put sign in zero flag.
7971    __ tst(scratch, Operand(HeapNumber::kSignMask));
7972    // Get the second half of the double. For some exponents we don't
7973    // actually need this because the bits get shifted out again, but
7974    // it's probably slower to test than just to do it.
7975    __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
7976    // Shift down 22 bits to get the last 10 bits.
7977    __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
7978    // Move down according to the exponent.
7979    __ mov(dest, Operand(scratch, LSR, dest));
7980    // Fix sign if sign bit was set.
7981    __ rsb(dest, dest, Operand(0), LeaveCC, ne);
7982  }
7983  __ bind(&done);
7984}
7985
7986// For bitwise ops where the inputs are not both Smis we here try to determine
7987// whether both inputs are either Smis or at least heap numbers that can be
7988// represented by a 32 bit signed value.  We truncate towards zero as required
7989// by the ES spec.  If this is the case we do the bitwise op and see if the
7990// result is a Smi.  If so, great, otherwise we try to find a heap number to
7991// write the answer into (either by allocating or by overwriting).
7992// On entry the operands are in lhs and rhs.  On exit the answer is in r0.
7993void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
7994                                                Register lhs,
7995                                                Register rhs) {
7996  Label slow, result_not_a_smi;
7997  Label rhs_is_smi, lhs_is_smi;
7998  Label done_checking_rhs, done_checking_lhs;
7999
8000  Register heap_number_map = r6;
8001  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8002
8003  __ tst(lhs, Operand(kSmiTagMask));
8004  __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number.
8005  __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
8006  __ cmp(r4, heap_number_map);
8007  __ b(ne, &slow);
8008  GetInt32(masm, lhs, r3, r5, r4, &slow);
8009  __ jmp(&done_checking_lhs);
8010  __ bind(&lhs_is_smi);
8011  __ mov(r3, Operand(lhs, ASR, 1));
8012  __ bind(&done_checking_lhs);
8013
8014  __ tst(rhs, Operand(kSmiTagMask));
8015  __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number.
8016  __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
8017  __ cmp(r4, heap_number_map);
8018  __ b(ne, &slow);
8019  GetInt32(masm, rhs, r2, r5, r4, &slow);
8020  __ jmp(&done_checking_rhs);
8021  __ bind(&rhs_is_smi);
8022  __ mov(r2, Operand(rhs, ASR, 1));
8023  __ bind(&done_checking_rhs);
8024
8025  ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
8026
8027  // r0 and r1: Original operands (Smi or heap numbers).
8028  // r2 and r3: Signed int32 operands.
8029  switch (op_) {
8030    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
8031    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
8032    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
8033    case Token::SAR:
8034      // Use only the 5 least significant bits of the shift count.
8035      __ and_(r2, r2, Operand(0x1f));
8036      __ mov(r2, Operand(r3, ASR, r2));
8037      break;
8038    case Token::SHR:
8039      // Use only the 5 least significant bits of the shift count.
8040      __ and_(r2, r2, Operand(0x1f));
8041      __ mov(r2, Operand(r3, LSR, r2), SetCC);
8042      // SHR is special because it is required to produce a positive answer.
8043      // The code below for writing into heap numbers isn't capable of writing
8044      // the register as an unsigned int so we go to slow case if we hit this
8045      // case.
8046      if (CpuFeatures::IsSupported(VFP3)) {
8047        __ b(mi, &result_not_a_smi);
8048      } else {
8049        __ b(mi, &slow);
8050      }
8051      break;
8052    case Token::SHL:
8053      // Use only the 5 least significant bits of the shift count.
8054      __ and_(r2, r2, Operand(0x1f));
8055      __ mov(r2, Operand(r3, LSL, r2));
8056      break;
8057    default: UNREACHABLE();
8058  }
8059  // check that the *signed* result fits in a smi
8060  __ add(r3, r2, Operand(0x40000000), SetCC);
8061  __ b(mi, &result_not_a_smi);
8062  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
8063  __ Ret();
8064
8065  Label have_to_allocate, got_a_heap_number;
8066  __ bind(&result_not_a_smi);
8067  switch (mode_) {
8068    case OVERWRITE_RIGHT: {
8069      __ tst(rhs, Operand(kSmiTagMask));
8070      __ b(eq, &have_to_allocate);
8071      __ mov(r5, Operand(rhs));
8072      break;
8073    }
8074    case OVERWRITE_LEFT: {
8075      __ tst(lhs, Operand(kSmiTagMask));
8076      __ b(eq, &have_to_allocate);
8077      __ mov(r5, Operand(lhs));
8078      break;
8079    }
8080    case NO_OVERWRITE: {
8081      // Get a new heap number in r5.  r4 and r7 are scratch.
8082      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
8083    }
8084    default: break;
8085  }
8086  __ bind(&got_a_heap_number);
8087  // r2: Answer as signed int32.
8088  // r5: Heap number to write answer into.
8089
8090  // Nothing can go wrong now, so move the heap number to r0, which is the
8091  // result.
8092  __ mov(r0, Operand(r5));
8093
8094  if (CpuFeatures::IsSupported(VFP3)) {
8095    // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
8096    CpuFeatures::Scope scope(VFP3);
8097    __ vmov(s0, r2);
8098    if (op_ == Token::SHR) {
8099      __ vcvt_f64_u32(d0, s0);
8100    } else {
8101      __ vcvt_f64_s32(d0, s0);
8102    }
8103    __ sub(r3, r0, Operand(kHeapObjectTag));
8104    __ vstr(d0, r3, HeapNumber::kValueOffset);
8105    __ Ret();
8106  } else {
8107    // Tail call that writes the int32 in r2 to the heap number in r0, using
8108    // r3 as scratch.  r0 is preserved and returned.
8109    WriteInt32ToHeapNumberStub stub(r2, r0, r3);
8110    __ TailCallStub(&stub);
8111  }
8112
8113  if (mode_ != NO_OVERWRITE) {
8114    __ bind(&have_to_allocate);
8115    // Get a new heap number in r5.  r4 and r7 are scratch.
8116    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
8117    __ jmp(&got_a_heap_number);
8118  }
8119
8120  // If all else failed then we go to the runtime system.
8121  __ bind(&slow);
8122  __ Push(lhs, rhs);  // Restore stack.
8123  switch (op_) {
8124    case Token::BIT_OR:
8125      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
8126      break;
8127    case Token::BIT_AND:
8128      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
8129      break;
8130    case Token::BIT_XOR:
8131      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
8132      break;
8133    case Token::SAR:
8134      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
8135      break;
8136    case Token::SHR:
8137      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
8138      break;
8139    case Token::SHL:
8140      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
8141      break;
8142    default:
8143      UNREACHABLE();
8144  }
8145}
8146
8147
8148// Can we multiply by x with max two shifts and an add.
8149// This answers yes to all integers from 2 to 10.
8150static bool IsEasyToMultiplyBy(int x) {
8151  if (x < 2) return false;                          // Avoid special cases.
8152  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
8153  if (IsPowerOf2(x)) return true;                   // Simple shift.
8154  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
8155  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
8156  return false;
8157}
8158
8159
8160// Can multiply by anything that IsEasyToMultiplyBy returns true for.
8161// Source and destination may be the same register.  This routine does
8162// not set carry and overflow the way a mul instruction would.
8163static void MultiplyByKnownInt(MacroAssembler* masm,
8164                               Register source,
8165                               Register destination,
8166                               int known_int) {
8167  if (IsPowerOf2(known_int)) {
8168    __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
8169  } else if (PopCountLessThanEqual2(known_int)) {
8170    int first_bit = BitPosition(known_int);
8171    int second_bit = BitPosition(known_int ^ (1 << first_bit));
8172    __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
8173    if (first_bit != 0) {
8174      __ mov(destination, Operand(destination, LSL, first_bit));
8175    }
8176  } else {
8177    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
8178    int the_bit = BitPosition(known_int + 1);
8179    __ rsb(destination, source, Operand(source, LSL, the_bit));
8180  }
8181}
8182
8183
8184// This function (as opposed to MultiplyByKnownInt) takes the known int in a
8185// a register for the cases where it doesn't know a good trick, and may deliver
8186// a result that needs shifting.
8187static void MultiplyByKnownInt2(
8188    MacroAssembler* masm,
8189    Register result,
8190    Register source,
8191    Register known_int_register,   // Smi tagged.
8192    int known_int,
8193    int* required_shift) {  // Including Smi tag shift
8194  switch (known_int) {
8195    case 3:
8196      __ add(result, source, Operand(source, LSL, 1));
8197      *required_shift = 1;
8198      break;
8199    case 5:
8200      __ add(result, source, Operand(source, LSL, 2));
8201      *required_shift = 1;
8202      break;
8203    case 6:
8204      __ add(result, source, Operand(source, LSL, 1));
8205      *required_shift = 2;
8206      break;
8207    case 7:
8208      __ rsb(result, source, Operand(source, LSL, 3));
8209      *required_shift = 1;
8210      break;
8211    case 9:
8212      __ add(result, source, Operand(source, LSL, 3));
8213      *required_shift = 1;
8214      break;
8215    case 10:
8216      __ add(result, source, Operand(source, LSL, 2));
8217      *required_shift = 2;
8218      break;
8219    default:
8220      ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient.
8221      __ mul(result, source, known_int_register);
8222      *required_shift = 0;
8223  }
8224}
8225
8226
8227// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
8228// trick.  See http://en.wikipedia.org/wiki/Divisibility_rule
8229// Takes the sum of the digits base (mask + 1) repeatedly until we have a
8230// number from 0 to mask.  On exit the 'eq' condition flags are set if the
8231// answer is exactly the mask.
8232void IntegerModStub::DigitSum(MacroAssembler* masm,
8233                              Register lhs,
8234                              int mask,
8235                              int shift,
8236                              Label* entry) {
8237  ASSERT(mask > 0);
8238  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
8239  Label loop;
8240  __ bind(&loop);
8241  __ and_(ip, lhs, Operand(mask));
8242  __ add(lhs, ip, Operand(lhs, LSR, shift));
8243  __ bind(entry);
8244  __ cmp(lhs, Operand(mask));
8245  __ b(gt, &loop);
8246}
8247
8248
8249void IntegerModStub::DigitSum(MacroAssembler* masm,
8250                              Register lhs,
8251                              Register scratch,
8252                              int mask,
8253                              int shift1,
8254                              int shift2,
8255                              Label* entry) {
8256  ASSERT(mask > 0);
8257  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
8258  Label loop;
8259  __ bind(&loop);
8260  __ bic(scratch, lhs, Operand(mask));
8261  __ and_(ip, lhs, Operand(mask));
8262  __ add(lhs, ip, Operand(lhs, LSR, shift1));
8263  __ add(lhs, lhs, Operand(scratch, LSR, shift2));
8264  __ bind(entry);
8265  __ cmp(lhs, Operand(mask));
8266  __ b(gt, &loop);
8267}
8268
8269
8270// Splits the number into two halves (bottom half has shift bits).  The top
8271// half is subtracted from the bottom half.  If the result is negative then
8272// rhs is added.
8273void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
8274                                                Register lhs,
8275                                                int shift,
8276                                                int rhs) {
8277  int mask = (1 << shift) - 1;
8278  __ and_(ip, lhs, Operand(mask));
8279  __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
8280  __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
8281}
8282
8283
8284void IntegerModStub::ModReduce(MacroAssembler* masm,
8285                               Register lhs,
8286                               int max,
8287                               int denominator) {
8288  int limit = denominator;
8289  while (limit * 2 <= max) limit *= 2;
8290  while (limit >= denominator) {
8291    __ cmp(lhs, Operand(limit));
8292    __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
8293    limit >>= 1;
8294  }
8295}
8296
8297
8298void IntegerModStub::ModAnswer(MacroAssembler* masm,
8299                               Register result,
8300                               Register shift_distance,
8301                               Register mask_bits,
8302                               Register sum_of_digits) {
8303  __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
8304  __ Ret();
8305}
8306
8307
8308// See comment for class.
8309void IntegerModStub::Generate(MacroAssembler* masm) {
8310  __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
8311  __ bic(odd_number_, odd_number_, Operand(1));
8312  __ mov(odd_number_, Operand(odd_number_, LSL, 1));
8313  // We now have (odd_number_ - 1) * 2 in the register.
8314  // Build a switch out of branches instead of data because it avoids
8315  // having to teach the assembler about intra-code-object pointers
8316  // that are not in relative branch instructions.
8317  Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
8318  Label mod21, mod23, mod25;
8319  { Assembler::BlockConstPoolScope block_const_pool(masm);
8320    __ add(pc, pc, Operand(odd_number_));
8321    // When you read pc it is always 8 ahead, but when you write it you always
8322    // write the actual value.  So we put in two nops to take up the slack.
8323    __ nop();
8324    __ nop();
8325    __ b(&mod3);
8326    __ b(&mod5);
8327    __ b(&mod7);
8328    __ b(&mod9);
8329    __ b(&mod11);
8330    __ b(&mod13);
8331    __ b(&mod15);
8332    __ b(&mod17);
8333    __ b(&mod19);
8334    __ b(&mod21);
8335    __ b(&mod23);
8336    __ b(&mod25);
8337  }
8338
8339  // For each denominator we find a multiple that is almost only ones
8340  // when expressed in binary.  Then we do the sum-of-digits trick for
8341  // that number.  If the multiple is not 1 then we have to do a little
8342  // more work afterwards to get the answer into the 0-denominator-1
8343  // range.
8344  DigitSum(masm, lhs_, 3, 2, &mod3);  // 3 = b11.
8345  __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
8346  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8347
8348  DigitSum(masm, lhs_, 0xf, 4, &mod5);  // 5 * 3 = b1111.
8349  ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
8350  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8351
8352  DigitSum(masm, lhs_, 7, 3, &mod7);  // 7 = b111.
8353  __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
8354  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8355
8356  DigitSum(masm, lhs_, 0x3f, 6, &mod9);  // 7 * 9 = b111111.
8357  ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
8358  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8359
8360  DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11);  // 5 * 11 = b110111.
8361  ModReduce(masm, lhs_, 0x3f, 11);
8362  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8363
8364  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13);  // 19 * 13 = b11110111.
8365  ModReduce(masm, lhs_, 0xff, 13);
8366  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8367
8368  DigitSum(masm, lhs_, 0xf, 4, &mod15);  // 15 = b1111.
8369  __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
8370  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8371
8372  DigitSum(masm, lhs_, 0xff, 8, &mod17);  // 15 * 17 = b11111111.
8373  ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
8374  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8375
8376  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19);  // 13 * 19 = b11110111.
8377  ModReduce(masm, lhs_, 0xff, 19);
8378  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8379
8380  DigitSum(masm, lhs_, 0x3f, 6, &mod21);  // 3 * 21 = b111111.
8381  ModReduce(masm, lhs_, 0x3f, 21);
8382  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8383
8384  DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23);  // 11 * 23 = b11111101.
8385  ModReduce(masm, lhs_, 0xff, 23);
8386  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8387
8388  DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25);  // 5 * 25 = b1111101.
8389  ModReduce(masm, lhs_, 0x7f, 25);
8390  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
8391}
8392
8393
8394const char* GenericBinaryOpStub::GetName() {
8395  if (name_ != NULL) return name_;
8396  const int len = 100;
8397  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
8398  if (name_ == NULL) return "OOM";
8399  const char* op_name = Token::Name(op_);
8400  const char* overwrite_name;
8401  switch (mode_) {
8402    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
8403    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
8404    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
8405    default: overwrite_name = "UnknownOverwrite"; break;
8406  }
8407
8408  OS::SNPrintF(Vector<char>(name_, len),
8409               "GenericBinaryOpStub_%s_%s%s_%s",
8410               op_name,
8411               overwrite_name,
8412               specialized_on_rhs_ ? "_ConstantRhs" : "",
8413               BinaryOpIC::GetName(runtime_operands_type_));
8414  return name_;
8415}
8416
8417
8418
8419void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
8420  // lhs_ : x
8421  // rhs_ : y
8422  // r0   : result
8423
8424  Register result = r0;
8425  Register lhs = lhs_;
8426  Register rhs = rhs_;
8427
8428  // This code can't cope with other register allocations yet.
8429  ASSERT(result.is(r0) &&
8430         ((lhs.is(r0) && rhs.is(r1)) ||
8431          (lhs.is(r1) && rhs.is(r0))));
8432
8433  Register smi_test_reg = VirtualFrame::scratch0();
8434  Register scratch = VirtualFrame::scratch1();
8435
8436  // All ops need to know whether we are dealing with two Smis.  Set up
8437  // smi_test_reg to tell us that.
8438  if (ShouldGenerateSmiCode()) {
8439    __ orr(smi_test_reg, lhs, Operand(rhs));
8440  }
8441
8442  switch (op_) {
8443    case Token::ADD: {
8444      Label not_smi;
8445      // Fast path.
8446      if (ShouldGenerateSmiCode()) {
8447        ASSERT(kSmiTag == 0);  // Adjust code below.
8448        __ tst(smi_test_reg, Operand(kSmiTagMask));
8449        __ b(ne, &not_smi);
8450        __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
8451        // Return if no overflow.
8452        __ Ret(vc);
8453        __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
8454      }
8455      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
8456      break;
8457    }
8458
8459    case Token::SUB: {
8460      Label not_smi;
8461      // Fast path.
8462      if (ShouldGenerateSmiCode()) {
8463        ASSERT(kSmiTag == 0);  // Adjust code below.
8464        __ tst(smi_test_reg, Operand(kSmiTagMask));
8465        __ b(ne, &not_smi);
8466        if (lhs.is(r1)) {
8467          __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
8468          // Return if no overflow.
8469          __ Ret(vc);
8470          __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
8471        } else {
8472          __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically.
8473          // Return if no overflow.
8474          __ Ret(vc);
8475          __ add(r0, r0, Operand(r1));  // Revert optimistic subtract.
8476        }
8477      }
8478      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
8479      break;
8480    }
8481
8482    case Token::MUL: {
8483      Label not_smi, slow;
8484      if (ShouldGenerateSmiCode()) {
8485        ASSERT(kSmiTag == 0);  // adjust code below
8486        __ tst(smi_test_reg, Operand(kSmiTagMask));
8487        Register scratch2 = smi_test_reg;
8488        smi_test_reg = no_reg;
8489        __ b(ne, &not_smi);
8490        // Remove tag from one operand (but keep sign), so that result is Smi.
8491        __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
8492        // Do multiplication
8493        // scratch = lower 32 bits of ip * lhs.
8494        __ smull(scratch, scratch2, lhs, ip);
8495        // Go slow on overflows (overflow bit is not set).
8496        __ mov(ip, Operand(scratch, ASR, 31));
8497        // No overflow if higher 33 bits are identical.
8498        __ cmp(ip, Operand(scratch2));
8499        __ b(ne, &slow);
8500        // Go slow on zero result to handle -0.
8501        __ tst(scratch, Operand(scratch));
8502        __ mov(result, Operand(scratch), LeaveCC, ne);
8503        __ Ret(ne);
8504        // We need -0 if we were multiplying a negative number with 0 to get 0.
8505        // We know one of them was zero.
8506        __ add(scratch2, rhs, Operand(lhs), SetCC);
8507        __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
8508        __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
8509        // Slow case.  We fall through here if we multiplied a negative number
8510        // with 0, because that would mean we should produce -0.
8511        __ bind(&slow);
8512      }
8513      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
8514      break;
8515    }
8516
8517    case Token::DIV:
8518    case Token::MOD: {
8519      Label not_smi;
8520      if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
8521        Label lhs_is_unsuitable;
8522        __ BranchOnNotSmi(lhs, &not_smi);
8523        if (IsPowerOf2(constant_rhs_)) {
8524          if (op_ == Token::MOD) {
8525            __ and_(rhs,
8526                    lhs,
8527                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
8528                    SetCC);
8529            // We now have the answer, but if the input was negative we also
8530            // have the sign bit.  Our work is done if the result is
8531            // positive or zero:
8532            if (!rhs.is(r0)) {
8533              __ mov(r0, rhs, LeaveCC, pl);
8534            }
8535            __ Ret(pl);
8536            // A mod of a negative left hand side must return a negative number.
8537            // Unfortunately if the answer is 0 then we must return -0.  And we
8538            // already optimistically trashed rhs so we may need to restore it.
8539            __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
8540            // Next two instructions are conditional on the answer being -0.
8541            __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
8542            __ b(eq, &lhs_is_unsuitable);
8543            // We need to subtract the dividend.  Eg. -3 % 4 == -3.
8544            __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
8545          } else {
8546            ASSERT(op_ == Token::DIV);
8547            __ tst(lhs,
8548                   Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
8549            __ b(ne, &lhs_is_unsuitable);  // Go slow on negative or remainder.
8550            int shift = 0;
8551            int d = constant_rhs_;
8552            while ((d & 1) == 0) {
8553              d >>= 1;
8554              shift++;
8555            }
8556            __ mov(r0, Operand(lhs, LSR, shift));
8557            __ bic(r0, r0, Operand(kSmiTagMask));
8558          }
8559        } else {
8560          // Not a power of 2.
8561          __ tst(lhs, Operand(0x80000000u));
8562          __ b(ne, &lhs_is_unsuitable);
8563          // Find a fixed point reciprocal of the divisor so we can divide by
8564          // multiplying.
8565          double divisor = 1.0 / constant_rhs_;
8566          int shift = 32;
8567          double scale = 4294967296.0;  // 1 << 32.
8568          uint32_t mul;
8569          // Maximise the precision of the fixed point reciprocal.
8570          while (true) {
8571            mul = static_cast<uint32_t>(scale * divisor);
8572            if (mul >= 0x7fffffff) break;
8573            scale *= 2.0;
8574            shift++;
8575          }
8576          mul++;
8577          Register scratch2 = smi_test_reg;
8578          smi_test_reg = no_reg;
8579          __ mov(scratch2, Operand(mul));
8580          __ umull(scratch, scratch2, scratch2, lhs);
8581          __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
8582          // scratch2 is lhs / rhs.  scratch2 is not Smi tagged.
8583          // rhs is still the known rhs.  rhs is Smi tagged.
8584          // lhs is still the unkown lhs.  lhs is Smi tagged.
8585          int required_scratch_shift = 0;  // Including the Smi tag shift of 1.
8586          // scratch = scratch2 * rhs.
8587          MultiplyByKnownInt2(masm,
8588                              scratch,
8589                              scratch2,
8590                              rhs,
8591                              constant_rhs_,
8592                              &required_scratch_shift);
8593          // scratch << required_scratch_shift is now the Smi tagged rhs *
8594          // (lhs / rhs) where / indicates integer division.
8595          if (op_ == Token::DIV) {
8596            __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
8597            __ b(ne, &lhs_is_unsuitable);  // There was a remainder.
8598            __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
8599          } else {
8600            ASSERT(op_ == Token::MOD);
8601            __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
8602          }
8603        }
8604        __ Ret();
8605        __ bind(&lhs_is_unsuitable);
8606      } else if (op_ == Token::MOD &&
8607                 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
8608                 runtime_operands_type_ != BinaryOpIC::STRINGS) {
8609        // Do generate a bit of smi code for modulus even though the default for
8610        // modulus is not to do it, but as the ARM processor has no coprocessor
8611        // support for modulus checking for smis makes sense.  We can handle
8612        // 1 to 25 times any power of 2.  This covers over half the numbers from
8613        // 1 to 100 including all of the first 25.  (Actually the constants < 10
8614        // are handled above by reciprocal multiplication.  We only get here for
8615        // those cases if the right hand side is not a constant or for cases
8616        // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
8617        // stub.)
8618        Label slow;
8619        Label not_power_of_2;
8620        ASSERT(!ShouldGenerateSmiCode());
8621        ASSERT(kSmiTag == 0);  // Adjust code below.
8622        // Check for two positive smis.
8623        __ orr(smi_test_reg, lhs, Operand(rhs));
8624        __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
8625        __ b(ne, &slow);
8626        // Check that rhs is a power of two and not zero.
8627        Register mask_bits = r3;
8628        __ sub(scratch, rhs, Operand(1), SetCC);
8629        __ b(mi, &slow);
8630        __ and_(mask_bits, rhs, Operand(scratch), SetCC);
8631        __ b(ne, &not_power_of_2);
8632        // Calculate power of two modulus.
8633        __ and_(result, lhs, Operand(scratch));
8634        __ Ret();
8635
8636        __ bind(&not_power_of_2);
8637        __ eor(scratch, scratch, Operand(mask_bits));
8638        // At least two bits are set in the modulus.  The high one(s) are in
8639        // mask_bits and the low one is scratch + 1.
8640        __ and_(mask_bits, scratch, Operand(lhs));
8641        Register shift_distance = scratch;
8642        scratch = no_reg;
8643
8644        // The rhs consists of a power of 2 multiplied by some odd number.
8645        // The power-of-2 part we handle by putting the corresponding bits
8646        // from the lhs in the mask_bits register, and the power in the
8647        // shift_distance register.  Shift distance is never 0 due to Smi
8648        // tagging.
8649        __ CountLeadingZeros(r4, shift_distance, shift_distance);
8650        __ rsb(shift_distance, r4, Operand(32));
8651
8652        // Now we need to find out what the odd number is. The last bit is
8653        // always 1.
8654        Register odd_number = r4;
8655        __ mov(odd_number, Operand(rhs, LSR, shift_distance));
8656        __ cmp(odd_number, Operand(25));
8657        __ b(gt, &slow);
8658
8659        IntegerModStub stub(
8660            result, shift_distance, odd_number, mask_bits, lhs, r5);
8661        __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);  // Tail call.
8662
8663        __ bind(&slow);
8664      }
8665      HandleBinaryOpSlowCases(
8666          masm,
8667          &not_smi,
8668          lhs,
8669          rhs,
8670          op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
8671      break;
8672    }
8673
8674    case Token::BIT_OR:
8675    case Token::BIT_AND:
8676    case Token::BIT_XOR:
8677    case Token::SAR:
8678    case Token::SHR:
8679    case Token::SHL: {
8680      Label slow;
8681      ASSERT(kSmiTag == 0);  // adjust code below
8682      __ tst(smi_test_reg, Operand(kSmiTagMask));
8683      __ b(ne, &slow);
8684      Register scratch2 = smi_test_reg;
8685      smi_test_reg = no_reg;
8686      switch (op_) {
8687        case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break;
8688        case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
8689        case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
8690        case Token::SAR:
8691          // Remove tags from right operand.
8692          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8693          __ mov(result, Operand(lhs, ASR, scratch2));
8694          // Smi tag result.
8695          __ bic(result, result, Operand(kSmiTagMask));
8696          break;
8697        case Token::SHR:
8698          // Remove tags from operands.  We can't do this on a 31 bit number
8699          // because then the 0s get shifted into bit 30 instead of bit 31.
8700          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
8701          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8702          __ mov(scratch, Operand(scratch, LSR, scratch2));
8703          // Unsigned shift is not allowed to produce a negative number, so
8704          // check the sign bit and the sign bit after Smi tagging.
8705          __ tst(scratch, Operand(0xc0000000));
8706          __ b(ne, &slow);
8707          // Smi tag result.
8708          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
8709          break;
8710        case Token::SHL:
8711          // Remove tags from operands.
8712          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
8713          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8714          __ mov(scratch, Operand(scratch, LSL, scratch2));
8715          // Check that the signed result fits in a Smi.
8716          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
8717          __ b(mi, &slow);
8718          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
8719          break;
8720        default: UNREACHABLE();
8721      }
8722      __ Ret();
8723      __ bind(&slow);
8724      HandleNonSmiBitwiseOp(masm, lhs, rhs);
8725      break;
8726    }
8727
8728    default: UNREACHABLE();
8729  }
8730  // This code should be unreachable.
8731  __ stop("Unreachable");
8732
8733  // Generate an unreachable reference to the DEFAULT stub so that it can be
8734  // found at the end of this stub when clearing ICs at GC.
8735  // TODO(kaznacheev): Check performance impact and get rid of this.
8736  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
8737    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
8738    __ CallStub(&uninit);
8739  }
8740}
8741
8742
8743void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
8744  Label get_result;
8745
8746  __ Push(r1, r0);
8747
8748  // Internal frame is necessary to handle exceptions properly.
8749  __ EnterInternalFrame();
8750  // Call the stub proper to get the result in r0.
8751  __ Call(&get_result);
8752  __ LeaveInternalFrame();
8753
8754  __ push(r0);
8755
8756  __ mov(r0, Operand(Smi::FromInt(MinorKey())));
8757  __ push(r0);
8758  __ mov(r0, Operand(Smi::FromInt(op_)));
8759  __ push(r0);
8760  __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
8761  __ push(r0);
8762
8763  __ TailCallExternalReference(
8764      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
8765      6,
8766      1);
8767
8768  // The entry point for the result calculation is assumed to be immediately
8769  // after this sequence.
8770  __ bind(&get_result);
8771}
8772
8773
8774Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
8775  GenericBinaryOpStub stub(key, type_info);
8776  return stub.GetCode();
8777}
8778
8779
8780void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
8781  // Argument is a number and is on stack and in r0.
8782  Label runtime_call;
8783  Label input_not_smi;
8784  Label loaded;
8785
8786  if (CpuFeatures::IsSupported(VFP3)) {
8787    // Load argument and check if it is a smi.
8788    __ BranchOnNotSmi(r0, &input_not_smi);
8789
8790    CpuFeatures::Scope scope(VFP3);
8791    // Input is a smi. Convert to double and load the low and high words
8792    // of the double into r2, r3.
8793    __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
8794    __ b(&loaded);
8795
8796    __ bind(&input_not_smi);
8797    // Check if input is a HeapNumber.
8798    __ CheckMap(r0,
8799                r1,
8800                Heap::kHeapNumberMapRootIndex,
8801                &runtime_call,
8802                true);
8803    // Input is a HeapNumber. Load it to a double register and store the
8804    // low and high words into r2, r3.
8805    __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
8806
8807    __ bind(&loaded);
8808    // r2 = low 32 bits of double value
8809    // r3 = high 32 bits of double value
8810    // Compute hash (the shifts are arithmetic):
8811    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
8812    __ eor(r1, r2, Operand(r3));
8813    __ eor(r1, r1, Operand(r1, ASR, 16));
8814    __ eor(r1, r1, Operand(r1, ASR, 8));
8815    ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
8816    __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
8817
8818    // r2 = low 32 bits of double value.
8819    // r3 = high 32 bits of double value.
8820    // r1 = TranscendentalCache::hash(double value).
8821    __ mov(r0,
8822           Operand(ExternalReference::transcendental_cache_array_address()));
8823    // r0 points to cache array.
8824    __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
8825    // r0 points to the cache for the type type_.
8826    // If NULL, the cache hasn't been initialized yet, so go through runtime.
8827    __ cmp(r0, Operand(0));
8828    __ b(eq, &runtime_call);
8829
8830#ifdef DEBUG
8831    // Check that the layout of cache elements match expectations.
8832    { TranscendentalCache::Element test_elem[2];
8833      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
8834      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
8835      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
8836      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
8837      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
8838      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
8839      CHECK_EQ(0, elem_in0 - elem_start);
8840      CHECK_EQ(kIntSize, elem_in1 - elem_start);
8841      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
8842    }
8843#endif
8844
8845    // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
8846    __ add(r1, r1, Operand(r1, LSL, 1));
8847    __ add(r0, r0, Operand(r1, LSL, 2));
8848    // Check if cache matches: Double value is stored in uint32_t[2] array.
8849    __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
8850    __ cmp(r2, r4);
8851    __ b(ne, &runtime_call);
8852    __ cmp(r3, r5);
8853    __ b(ne, &runtime_call);
8854    // Cache hit. Load result, pop argument and return.
8855    __ mov(r0, Operand(r6));
8856    __ pop();
8857    __ Ret();
8858  }
8859
8860  __ bind(&runtime_call);
8861  __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
8862}
8863
8864
8865Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
8866  switch (type_) {
8867    // Add more cases when necessary.
8868    case TranscendentalCache::SIN: return Runtime::kMath_sin;
8869    case TranscendentalCache::COS: return Runtime::kMath_cos;
8870    default:
8871      UNIMPLEMENTED();
8872      return Runtime::kAbort;
8873  }
8874}
8875
8876
8877void StackCheckStub::Generate(MacroAssembler* masm) {
8878  // Do tail-call to runtime routine.  Runtime routines expect at least one
8879  // argument, so give it a Smi.
8880  __ mov(r0, Operand(Smi::FromInt(0)));
8881  __ push(r0);
8882  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
8883
8884  __ StubReturn(1);
8885}
8886
8887
8888void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
8889  Label slow, done;
8890
8891  Register heap_number_map = r6;
8892  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8893
8894  if (op_ == Token::SUB) {
8895    // Check whether the value is a smi.
8896    Label try_float;
8897    __ tst(r0, Operand(kSmiTagMask));
8898    __ b(ne, &try_float);
8899
8900    // Go slow case if the value of the expression is zero
8901    // to make sure that we switch between 0 and -0.
8902    __ cmp(r0, Operand(0));
8903    __ b(eq, &slow);
8904
8905    // The value of the expression is a smi that is not zero.  Try
8906    // optimistic subtraction '0 - value'.
8907    __ rsb(r1, r0, Operand(0), SetCC);
8908    __ b(vs, &slow);
8909
8910    __ mov(r0, Operand(r1));  // Set r0 to result.
8911    __ b(&done);
8912
8913    __ bind(&try_float);
8914    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
8915    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8916    __ cmp(r1, heap_number_map);
8917    __ b(ne, &slow);
8918    // r0 is a heap number.  Get a new heap number in r1.
8919    if (overwrite_) {
8920      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8921      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
8922      __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8923    } else {
8924      __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
8925      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
8926      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8927      __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
8928      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
8929      __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
8930      __ mov(r0, Operand(r1));
8931    }
8932  } else if (op_ == Token::BIT_NOT) {
8933    // Check if the operand is a heap number.
8934    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
8935    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8936    __ cmp(r1, heap_number_map);
8937    __ b(ne, &slow);
8938
8939    // Convert the heap number is r0 to an untagged integer in r1.
8940    GetInt32(masm, r0, r1, r2, r3, &slow);
8941
8942    // Do the bitwise operation (move negated) and check if the result
8943    // fits in a smi.
8944    Label try_float;
8945    __ mvn(r1, Operand(r1));
8946    __ add(r2, r1, Operand(0x40000000), SetCC);
8947    __ b(mi, &try_float);
8948    __ mov(r0, Operand(r1, LSL, kSmiTagSize));
8949    __ b(&done);
8950
8951    __ bind(&try_float);
8952    if (!overwrite_) {
8953      // Allocate a fresh heap number, but don't overwrite r0 until
8954      // we're sure we can do it without going through the slow case
8955      // that needs the value in r0.
8956      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
8957      __ mov(r0, Operand(r2));
8958    }
8959
8960    if (CpuFeatures::IsSupported(VFP3)) {
8961      // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
8962      CpuFeatures::Scope scope(VFP3);
8963      __ vmov(s0, r1);
8964      __ vcvt_f64_s32(d0, s0);
8965      __ sub(r2, r0, Operand(kHeapObjectTag));
8966      __ vstr(d0, r2, HeapNumber::kValueOffset);
8967    } else {
8968      // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
8969      // have to set up a frame.
8970      WriteInt32ToHeapNumberStub stub(r1, r0, r2);
8971      __ push(lr);
8972      __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
8973      __ pop(lr);
8974    }
8975  } else {
8976    UNIMPLEMENTED();
8977  }
8978
8979  __ bind(&done);
8980  __ StubReturn(1);
8981
8982  // Handle the slow case by jumping to the JavaScript builtin.
8983  __ bind(&slow);
8984  __ push(r0);
8985  switch (op_) {
8986    case Token::SUB:
8987      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
8988      break;
8989    case Token::BIT_NOT:
8990      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
8991      break;
8992    default:
8993      UNREACHABLE();
8994  }
8995}
8996
8997
8998void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
8999  // r0 holds the exception.
9000
9001  // Adjust this code if not the case.
9002  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
9003
9004  // Drop the sp to the top of the handler.
9005  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
9006  __ ldr(sp, MemOperand(r3));
9007
9008  // Restore the next handler and frame pointer, discard handler state.
9009  ASSERT(StackHandlerConstants::kNextOffset == 0);
9010  __ pop(r2);
9011  __ str(r2, MemOperand(r3));
9012  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
9013  __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
9014
9015  // Before returning we restore the context from the frame pointer if
9016  // not NULL.  The frame pointer is NULL in the exception handler of a
9017  // JS entry frame.
9018  __ cmp(fp, Operand(0));
9019  // Set cp to NULL if fp is NULL.
9020  __ mov(cp, Operand(0), LeaveCC, eq);
9021  // Restore cp otherwise.
9022  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
9023#ifdef DEBUG
9024  if (FLAG_debug_code) {
9025    __ mov(lr, Operand(pc));
9026  }
9027#endif
9028  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
9029  __ pop(pc);
9030}
9031
9032
9033void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
9034                                          UncatchableExceptionType type) {
9035  // Adjust this code if not the case.
9036  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
9037
9038  // Drop sp to the top stack handler.
9039  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
9040  __ ldr(sp, MemOperand(r3));
9041
9042  // Unwind the handlers until the ENTRY handler is found.
9043  Label loop, done;
9044  __ bind(&loop);
9045  // Load the type of the current stack handler.
9046  const int kStateOffset = StackHandlerConstants::kStateOffset;
9047  __ ldr(r2, MemOperand(sp, kStateOffset));
9048  __ cmp(r2, Operand(StackHandler::ENTRY));
9049  __ b(eq, &done);
9050  // Fetch the next handler in the list.
9051  const int kNextOffset = StackHandlerConstants::kNextOffset;
9052  __ ldr(sp, MemOperand(sp, kNextOffset));
9053  __ jmp(&loop);
9054  __ bind(&done);
9055
9056  // Set the top handler address to next handler past the current ENTRY handler.
9057  ASSERT(StackHandlerConstants::kNextOffset == 0);
9058  __ pop(r2);
9059  __ str(r2, MemOperand(r3));
9060
9061  if (type == OUT_OF_MEMORY) {
9062    // Set external caught exception to false.
9063    ExternalReference external_caught(Top::k_external_caught_exception_address);
9064    __ mov(r0, Operand(false));
9065    __ mov(r2, Operand(external_caught));
9066    __ str(r0, MemOperand(r2));
9067
9068    // Set pending exception and r0 to out of memory exception.
9069    Failure* out_of_memory = Failure::OutOfMemoryException();
9070    __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
9071    __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
9072    __ str(r0, MemOperand(r2));
9073  }
9074
9075  // Stack layout at this point. See also StackHandlerConstants.
9076  // sp ->   state (ENTRY)
9077  //         fp
9078  //         lr
9079
9080  // Discard handler state (r2 is not used) and restore frame pointer.
9081  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
9082  __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
9083  // Before returning we restore the context from the frame pointer if
9084  // not NULL.  The frame pointer is NULL in the exception handler of a
9085  // JS entry frame.
9086  __ cmp(fp, Operand(0));
9087  // Set cp to NULL if fp is NULL.
9088  __ mov(cp, Operand(0), LeaveCC, eq);
9089  // Restore cp otherwise.
9090  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
9091#ifdef DEBUG
9092  if (FLAG_debug_code) {
9093    __ mov(lr, Operand(pc));
9094  }
9095#endif
9096  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
9097  __ pop(pc);
9098}
9099
9100
9101void CEntryStub::GenerateCore(MacroAssembler* masm,
9102                              Label* throw_normal_exception,
9103                              Label* throw_termination_exception,
9104                              Label* throw_out_of_memory_exception,
9105                              bool do_gc,
9106                              bool always_allocate,
9107                              int frame_alignment_skew) {
9108  // r0: result parameter for PerformGC, if any
9109  // r4: number of arguments including receiver  (C callee-saved)
9110  // r5: pointer to builtin function  (C callee-saved)
9111  // r6: pointer to the first argument (C callee-saved)
9112
9113  if (do_gc) {
9114    // Passing r0.
9115    __ PrepareCallCFunction(1, r1);
9116    __ CallCFunction(ExternalReference::perform_gc_function(), 1);
9117  }
9118
9119  ExternalReference scope_depth =
9120      ExternalReference::heap_always_allocate_scope_depth();
9121  if (always_allocate) {
9122    __ mov(r0, Operand(scope_depth));
9123    __ ldr(r1, MemOperand(r0));
9124    __ add(r1, r1, Operand(1));
9125    __ str(r1, MemOperand(r0));
9126  }
9127
9128  // Call C built-in.
9129  // r0 = argc, r1 = argv
9130  __ mov(r0, Operand(r4));
9131  __ mov(r1, Operand(r6));
9132
9133  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
9134  int frame_alignment_mask = frame_alignment - 1;
9135#if defined(V8_HOST_ARCH_ARM)
9136  if (FLAG_debug_code) {
9137    if (frame_alignment > kPointerSize) {
9138      Label alignment_as_expected;
9139      ASSERT(IsPowerOf2(frame_alignment));
9140      __ sub(r2, sp, Operand(frame_alignment_skew));
9141      __ tst(r2, Operand(frame_alignment_mask));
9142      __ b(eq, &alignment_as_expected);
9143      // Don't use Check here, as it will call Runtime_Abort re-entering here.
9144      __ stop("Unexpected alignment");
9145      __ bind(&alignment_as_expected);
9146    }
9147  }
9148#endif
9149
9150  // Just before the call (jump) below lr is pushed, so the actual alignment is
9151  // adding one to the current skew.
9152  int alignment_before_call =
9153      (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
9154  if (alignment_before_call > 0) {
9155    // Push until the alignment before the call is met.
9156    __ mov(r2, Operand(0));
9157    for (int i = alignment_before_call;
9158        (i & frame_alignment_mask) != 0;
9159        i += kPointerSize) {
9160      __ push(r2);
9161    }
9162  }
9163
9164  // TODO(1242173): To let the GC traverse the return address of the exit
9165  // frames, we need to know where the return address is. Right now,
9166  // we push it on the stack to be able to find it again, but we never
9167  // restore from it in case of changes, which makes it impossible to
9168  // support moving the C entry code stub. This should be fixed, but currently
9169  // this is OK because the CEntryStub gets generated so early in the V8 boot
9170  // sequence that it is not moving ever.
9171  masm->add(lr, pc, Operand(4));  // Compute return address: (pc + 8) + 4
9172  masm->push(lr);
9173  masm->Jump(r5);
9174
9175  // Restore sp back to before aligning the stack.
9176  if (alignment_before_call > 0) {
9177    __ add(sp, sp, Operand(alignment_before_call));
9178  }
9179
9180  if (always_allocate) {
9181    // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
9182    // though (contain the result).
9183    __ mov(r2, Operand(scope_depth));
9184    __ ldr(r3, MemOperand(r2));
9185    __ sub(r3, r3, Operand(1));
9186    __ str(r3, MemOperand(r2));
9187  }
9188
9189  // check for failure result
9190  Label failure_returned;
9191  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
9192  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
9193  __ add(r2, r0, Operand(1));
9194  __ tst(r2, Operand(kFailureTagMask));
9195  __ b(eq, &failure_returned);
9196
9197  // Exit C frame and return.
9198  // r0:r1: result
9199  // sp: stack pointer
9200  // fp: frame pointer
9201  __ LeaveExitFrame(mode_);
9202
9203  // check if we should retry or throw exception
9204  Label retry;
9205  __ bind(&failure_returned);
9206  ASSERT(Failure::RETRY_AFTER_GC == 0);
9207  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
9208  __ b(eq, &retry);
9209
9210  // Special handling of out of memory exceptions.
9211  Failure* out_of_memory = Failure::OutOfMemoryException();
9212  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
9213  __ b(eq, throw_out_of_memory_exception);
9214
9215  // Retrieve the pending exception and clear the variable.
9216  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
9217  __ ldr(r3, MemOperand(ip));
9218  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
9219  __ ldr(r0, MemOperand(ip));
9220  __ str(r3, MemOperand(ip));
9221
9222  // Special handling of termination exceptions which are uncatchable
9223  // by javascript code.
9224  __ cmp(r0, Operand(Factory::termination_exception()));
9225  __ b(eq, throw_termination_exception);
9226
9227  // Handle normal exception.
9228  __ jmp(throw_normal_exception);
9229
9230  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
9231}
9232
9233
9234void CEntryStub::Generate(MacroAssembler* masm) {
9235  // Called from JavaScript; parameters are on stack as if calling JS function
9236  // r0: number of arguments including receiver
9237  // r1: pointer to builtin function
9238  // fp: frame pointer  (restored after C call)
9239  // sp: stack pointer  (restored as callee's sp after C call)
9240  // cp: current context  (C callee-saved)
9241
9242  // Result returned in r0 or r0+r1 by default.
9243
9244  // NOTE: Invocations of builtins may return failure objects
9245  // instead of a proper result. The builtin entry handles
9246  // this by performing a garbage collection and retrying the
9247  // builtin once.
9248
9249  // Enter the exit frame that transitions from JavaScript to C++.
9250  __ EnterExitFrame(mode_);
9251
9252  // r4: number of arguments (C callee-saved)
9253  // r5: pointer to builtin function (C callee-saved)
9254  // r6: pointer to first argument (C callee-saved)
9255
9256  Label throw_normal_exception;
9257  Label throw_termination_exception;
9258  Label throw_out_of_memory_exception;
9259
9260  // Call into the runtime system.
9261  GenerateCore(masm,
9262               &throw_normal_exception,
9263               &throw_termination_exception,
9264               &throw_out_of_memory_exception,
9265               false,
9266               false,
9267               -kPointerSize);
9268
9269  // Do space-specific GC and retry runtime call.
9270  GenerateCore(masm,
9271               &throw_normal_exception,
9272               &throw_termination_exception,
9273               &throw_out_of_memory_exception,
9274               true,
9275               false,
9276               0);
9277
9278  // Do full GC and retry runtime call one final time.
9279  Failure* failure = Failure::InternalError();
9280  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
9281  GenerateCore(masm,
9282               &throw_normal_exception,
9283               &throw_termination_exception,
9284               &throw_out_of_memory_exception,
9285               true,
9286               true,
9287               kPointerSize);
9288
9289  __ bind(&throw_out_of_memory_exception);
9290  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
9291
9292  __ bind(&throw_termination_exception);
9293  GenerateThrowUncatchable(masm, TERMINATION);
9294
9295  __ bind(&throw_normal_exception);
9296  GenerateThrowTOS(masm);
9297}
9298
9299
9300void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
9301  // r0: code entry
9302  // r1: function
9303  // r2: receiver
9304  // r3: argc
9305  // [sp+0]: argv
9306
9307  Label invoke, exit;
9308
9309  // Called from C, so do not pop argc and args on exit (preserve sp)
9310  // No need to save register-passed args
9311  // Save callee-saved registers (incl. cp and fp), sp, and lr
9312  __ stm(db_w, sp, kCalleeSaved | lr.bit());
9313
9314  // Get address of argv, see stm above.
9315  // r0: code entry
9316  // r1: function
9317  // r2: receiver
9318  // r3: argc
9319  __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize));  // argv
9320
9321  // Push a frame with special values setup to mark it as an entry frame.
9322  // r0: code entry
9323  // r1: function
9324  // r2: receiver
9325  // r3: argc
9326  // r4: argv
9327  __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used.
9328  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
9329  __ mov(r7, Operand(Smi::FromInt(marker)));
9330  __ mov(r6, Operand(Smi::FromInt(marker)));
9331  __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
9332  __ ldr(r5, MemOperand(r5));
9333  __ Push(r8, r7, r6, r5);
9334
9335  // Setup frame pointer for the frame to be pushed.
9336  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
9337
9338  // Call a faked try-block that does the invoke.
9339  __ bl(&invoke);
9340
9341  // Caught exception: Store result (exception) in the pending
9342  // exception field in the JSEnv and return a failure sentinel.
9343  // Coming in here the fp will be invalid because the PushTryHandler below
9344  // sets it to 0 to signal the existence of the JSEntry frame.
9345  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
9346  __ str(r0, MemOperand(ip));
9347  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
9348  __ b(&exit);
9349
9350  // Invoke: Link this frame into the handler chain.
9351  __ bind(&invoke);
9352  // Must preserve r0-r4, r5-r7 are available.
9353  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
9354  // If an exception not caught by another handler occurs, this handler
9355  // returns control to the code after the bl(&invoke) above, which
9356  // restores all kCalleeSaved registers (including cp and fp) to their
9357  // saved values before returning a failure to C.
9358
9359  // Clear any pending exceptions.
9360  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
9361  __ ldr(r5, MemOperand(ip));
9362  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
9363  __ str(r5, MemOperand(ip));
9364
9365  // Invoke the function by calling through JS entry trampoline builtin.
9366  // Notice that we cannot store a reference to the trampoline code directly in
9367  // this stub, because runtime stubs are not traversed when doing GC.
9368
9369  // Expected registers by Builtins::JSEntryTrampoline
9370  // r0: code entry
9371  // r1: function
9372  // r2: receiver
9373  // r3: argc
9374  // r4: argv
9375  if (is_construct) {
9376    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
9377    __ mov(ip, Operand(construct_entry));
9378  } else {
9379    ExternalReference entry(Builtins::JSEntryTrampoline);
9380    __ mov(ip, Operand(entry));
9381  }
9382  __ ldr(ip, MemOperand(ip));  // deref address
9383
9384  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
9385  // macro for the add instruction because we don't want the coverage tool
9386  // inserting instructions here after we read the pc.
9387  __ mov(lr, Operand(pc));
9388  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
9389
9390  // Unlink this frame from the handler chain. When reading the
9391  // address of the next handler, there is no need to use the address
9392  // displacement since the current stack pointer (sp) points directly
9393  // to the stack handler.
9394  __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
9395  __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
9396  __ str(r3, MemOperand(ip));
9397  // No need to restore registers
9398  __ add(sp, sp, Operand(StackHandlerConstants::kSize));
9399
9400
9401  __ bind(&exit);  // r0 holds result
9402  // Restore the top frame descriptors from the stack.
9403  __ pop(r3);
9404  __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
9405  __ str(r3, MemOperand(ip));
9406
9407  // Reset the stack to the callee saved registers.
9408  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
9409
9410  // Restore callee-saved registers and return.
9411#ifdef DEBUG
9412  if (FLAG_debug_code) {
9413    __ mov(lr, Operand(pc));
9414  }
9415#endif
9416  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
9417}
9418
9419
9420// This stub performs an instanceof, calling the builtin function if
9421// necessary.  Uses r1 for the object, r0 for the function that it may
9422// be an instance of (these are fetched from the stack).
9423void InstanceofStub::Generate(MacroAssembler* masm) {
9424  // Get the object - slow case for smis (we may need to throw an exception
9425  // depending on the rhs).
9426  Label slow, loop, is_instance, is_not_instance;
9427  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
9428  __ BranchOnSmi(r0, &slow);
9429
9430  // Check that the left hand is a JS object and put map in r3.
9431  __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
9432  __ b(lt, &slow);
9433  __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
9434  __ b(gt, &slow);
9435
9436  // Get the prototype of the function (r4 is result, r2 is scratch).
9437  __ ldr(r1, MemOperand(sp, 0));
9438  // r1 is function, r3 is map.
9439
9440  // Look up the function and the map in the instanceof cache.
9441  Label miss;
9442  __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
9443  __ cmp(r1, ip);
9444  __ b(ne, &miss);
9445  __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
9446  __ cmp(r3, ip);
9447  __ b(ne, &miss);
9448  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9449  __ pop();
9450  __ pop();
9451  __ mov(pc, Operand(lr));
9452
9453  __ bind(&miss);
9454  __ TryGetFunctionPrototype(r1, r4, r2, &slow);
9455
9456  // Check that the function prototype is a JS object.
9457  __ BranchOnSmi(r4, &slow);
9458  __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
9459  __ b(lt, &slow);
9460  __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
9461  __ b(gt, &slow);
9462
9463  __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
9464  __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
9465
9466  // Register mapping: r3 is object map and r4 is function prototype.
9467  // Get prototype of object into r2.
9468  __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
9469
9470  // Loop through the prototype chain looking for the function prototype.
9471  __ bind(&loop);
9472  __ cmp(r2, Operand(r4));
9473  __ b(eq, &is_instance);
9474  __ LoadRoot(ip, Heap::kNullValueRootIndex);
9475  __ cmp(r2, ip);
9476  __ b(eq, &is_not_instance);
9477  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
9478  __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
9479  __ jmp(&loop);
9480
9481  __ bind(&is_instance);
9482  __ mov(r0, Operand(Smi::FromInt(0)));
9483  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9484  __ pop();
9485  __ pop();
9486  __ mov(pc, Operand(lr));  // Return.
9487
9488  __ bind(&is_not_instance);
9489  __ mov(r0, Operand(Smi::FromInt(1)));
9490  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9491  __ pop();
9492  __ pop();
9493  __ mov(pc, Operand(lr));  // Return.
9494
9495  // Slow-case.  Tail call builtin.
9496  __ bind(&slow);
9497  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
9498}
9499
9500
9501void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
9502  // The displacement is the offset of the last parameter (if any)
9503  // relative to the frame pointer.
9504  static const int kDisplacement =
9505      StandardFrameConstants::kCallerSPOffset - kPointerSize;
9506
9507  // Check that the key is a smi.
9508  Label slow;
9509  __ BranchOnNotSmi(r1, &slow);
9510
9511  // Check if the calling frame is an arguments adaptor frame.
9512  Label adaptor;
9513  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
9514  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
9515  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
9516  __ b(eq, &adaptor);
9517
9518  // Check index against formal parameters count limit passed in
9519  // through register r0. Use unsigned comparison to get negative
9520  // check for free.
9521  __ cmp(r1, r0);
9522  __ b(cs, &slow);
9523
9524  // Read the argument from the stack and return it.
9525  __ sub(r3, r0, r1);
9526  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
9527  __ ldr(r0, MemOperand(r3, kDisplacement));
9528  __ Jump(lr);
9529
9530  // Arguments adaptor case: Check index against actual arguments
9531  // limit found in the arguments adaptor frame. Use unsigned
9532  // comparison to get negative check for free.
9533  __ bind(&adaptor);
9534  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
9535  __ cmp(r1, r0);
9536  __ b(cs, &slow);
9537
9538  // Read the argument from the adaptor frame and return it.
9539  __ sub(r3, r0, r1);
9540  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
9541  __ ldr(r0, MemOperand(r3, kDisplacement));
9542  __ Jump(lr);
9543
9544  // Slow-case: Handle non-smi or out-of-bounds access to arguments
9545  // by calling the runtime system.
9546  __ bind(&slow);
9547  __ push(r1);
9548  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
9549}
9550
9551
9552void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
9553  // sp[0] : number of parameters
9554  // sp[4] : receiver displacement
9555  // sp[8] : function
9556
9557  // Check if the calling frame is an arguments adaptor frame.
9558  Label adaptor_frame, try_allocate, runtime;
9559  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
9560  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
9561  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
9562  __ b(eq, &adaptor_frame);
9563
9564  // Get the length from the frame.
9565  __ ldr(r1, MemOperand(sp, 0));
9566  __ b(&try_allocate);
9567
9568  // Patch the arguments.length and the parameters pointer.
9569  __ bind(&adaptor_frame);
9570  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
9571  __ str(r1, MemOperand(sp, 0));
9572  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
9573  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
9574  __ str(r3, MemOperand(sp, 1 * kPointerSize));
9575
9576  // Try the new space allocation. Start out with computing the size
9577  // of the arguments object and the elements array in words.
9578  Label add_arguments_object;
9579  __ bind(&try_allocate);
9580  __ cmp(r1, Operand(0));
9581  __ b(eq, &add_arguments_object);
9582  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
9583  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
9584  __ bind(&add_arguments_object);
9585  __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
9586
9587  // Do the allocation of both objects in one go.
9588  __ AllocateInNewSpace(
9589      r1,
9590      r0,
9591      r2,
9592      r3,
9593      &runtime,
9594      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
9595
9596  // Get the arguments boilerplate from the current (global) context.
9597  int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
9598  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
9599  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
9600  __ ldr(r4, MemOperand(r4, offset));
9601
9602  // Copy the JS object part.
9603  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
9604    __ ldr(r3, FieldMemOperand(r4, i));
9605    __ str(r3, FieldMemOperand(r0, i));
9606  }
9607
9608  // Setup the callee in-object property.
9609  ASSERT(Heap::arguments_callee_index == 0);
9610  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
9611  __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
9612
9613  // Get the length (smi tagged) and set that as an in-object property too.
9614  ASSERT(Heap::arguments_length_index == 1);
9615  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
9616  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
9617
9618  // If there are no actual arguments, we're done.
9619  Label done;
9620  __ cmp(r1, Operand(0));
9621  __ b(eq, &done);
9622
9623  // Get the parameters pointer from the stack.
9624  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
9625
9626  // Setup the elements pointer in the allocated arguments object and
9627  // initialize the header in the elements fixed array.
9628  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
9629  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
9630  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
9631  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
9632  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
9633  __ mov(r1, Operand(r1, LSR, kSmiTagSize));  // Untag the length for the loop.
9634
9635  // Copy the fixed array slots.
9636  Label loop;
9637  // Setup r4 to point to the first array slot.
9638  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
9639  __ bind(&loop);
9640  // Pre-decrement r2 with kPointerSize on each iteration.
9641  // Pre-decrement in order to skip receiver.
9642  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
9643  // Post-increment r4 with kPointerSize on each iteration.
9644  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
9645  __ sub(r1, r1, Operand(1));
9646  __ cmp(r1, Operand(0));
9647  __ b(ne, &loop);
9648
9649  // Return and remove the on-stack parameters.
9650  __ bind(&done);
9651  __ add(sp, sp, Operand(3 * kPointerSize));
9652  __ Ret();
9653
9654  // Do the runtime call to allocate the arguments object.
9655  __ bind(&runtime);
9656  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
9657}
9658
9659
9660void RegExpExecStub::Generate(MacroAssembler* masm) {
9661  // Just jump directly to runtime if native RegExp is not selected at compile
9662  // time or if regexp entry in generated code is turned off runtime switch or
9663  // at compilation.
9664#ifdef V8_INTERPRETED_REGEXP
9665  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9666#else  // V8_INTERPRETED_REGEXP
9667  if (!FLAG_regexp_entry_native) {
9668    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9669    return;
9670  }
9671
9672  // Stack frame on entry.
9673  //  sp[0]: last_match_info (expected JSArray)
9674  //  sp[4]: previous index
9675  //  sp[8]: subject string
9676  //  sp[12]: JSRegExp object
9677
9678  static const int kLastMatchInfoOffset = 0 * kPointerSize;
9679  static const int kPreviousIndexOffset = 1 * kPointerSize;
9680  static const int kSubjectOffset = 2 * kPointerSize;
9681  static const int kJSRegExpOffset = 3 * kPointerSize;
9682
9683  Label runtime, invoke_regexp;
9684
9685  // Allocation of registers for this function. These are in callee save
9686  // registers and will be preserved by the call to the native RegExp code, as
9687  // this code is called using the normal C calling convention. When calling
9688  // directly from generated code the native RegExp code will not do a GC and
9689  // therefore the content of these registers are safe to use after the call.
9690  Register subject = r4;
9691  Register regexp_data = r5;
9692  Register last_match_info_elements = r6;
9693
9694  // Ensure that a RegExp stack is allocated.
9695  ExternalReference address_of_regexp_stack_memory_address =
9696      ExternalReference::address_of_regexp_stack_memory_address();
9697  ExternalReference address_of_regexp_stack_memory_size =
9698      ExternalReference::address_of_regexp_stack_memory_size();
9699  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
9700  __ ldr(r0, MemOperand(r0, 0));
9701  __ tst(r0, Operand(r0));
9702  __ b(eq, &runtime);
9703
9704  // Check that the first argument is a JSRegExp object.
9705  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
9706  ASSERT_EQ(0, kSmiTag);
9707  __ tst(r0, Operand(kSmiTagMask));
9708  __ b(eq, &runtime);
9709  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
9710  __ b(ne, &runtime);
9711
9712  // Check that the RegExp has been compiled (data contains a fixed array).
9713  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
9714  if (FLAG_debug_code) {
9715    __ tst(regexp_data, Operand(kSmiTagMask));
9716    __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
9717    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
9718    __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
9719  }
9720
9721  // regexp_data: RegExp data (FixedArray)
9722  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
9723  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
9724  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
9725  __ b(ne, &runtime);
9726
9727  // regexp_data: RegExp data (FixedArray)
9728  // Check that the number of captures fit in the static offsets vector buffer.
9729  __ ldr(r2,
9730         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
9731  // Calculate number of capture registers (number_of_captures + 1) * 2. This
9732  // uses the asumption that smis are 2 * their untagged value.
9733  ASSERT_EQ(0, kSmiTag);
9734  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9735  __ add(r2, r2, Operand(2));  // r2 was a smi.
9736  // Check that the static offsets vector buffer is large enough.
9737  __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
9738  __ b(hi, &runtime);
9739
9740  // r2: Number of capture registers
9741  // regexp_data: RegExp data (FixedArray)
9742  // Check that the second argument is a string.
9743  __ ldr(subject, MemOperand(sp, kSubjectOffset));
9744  __ tst(subject, Operand(kSmiTagMask));
9745  __ b(eq, &runtime);
9746  Condition is_string = masm->IsObjectStringType(subject, r0);
9747  __ b(NegateCondition(is_string), &runtime);
9748  // Get the length of the string to r3.
9749  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
9750
9751  // r2: Number of capture registers
9752  // r3: Length of subject string as a smi
9753  // subject: Subject string
9754  // regexp_data: RegExp data (FixedArray)
9755  // Check that the third argument is a positive smi less than the subject
9756  // string length. A negative value will be greater (unsigned comparison).
9757  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
9758  __ tst(r0, Operand(kSmiTagMask));
9759  __ b(ne, &runtime);
9760  __ cmp(r3, Operand(r0));
9761  __ b(ls, &runtime);
9762
9763  // r2: Number of capture registers
9764  // subject: Subject string
9765  // regexp_data: RegExp data (FixedArray)
9766  // Check that the fourth object is a JSArray object.
9767  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
9768  __ tst(r0, Operand(kSmiTagMask));
9769  __ b(eq, &runtime);
9770  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
9771  __ b(ne, &runtime);
9772  // Check that the JSArray is in fast case.
9773  __ ldr(last_match_info_elements,
9774         FieldMemOperand(r0, JSArray::kElementsOffset));
9775  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
9776  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
9777  __ cmp(r0, ip);
9778  __ b(ne, &runtime);
9779  // Check that the last match info has space for the capture registers and the
9780  // additional information.
9781  __ ldr(r0,
9782         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
9783  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
9784  __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
9785  __ b(gt, &runtime);
9786
9787  // subject: Subject string
9788  // regexp_data: RegExp data (FixedArray)
9789  // Check the representation and encoding of the subject string.
9790  Label seq_string;
9791  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
9792  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
9793  // First check for flat string.
9794  __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
9795  ASSERT_EQ(0, kStringTag | kSeqStringTag);
9796  __ b(eq, &seq_string);
9797
9798  // subject: Subject string
9799  // regexp_data: RegExp data (FixedArray)
9800  // Check for flat cons string.
9801  // A flat cons string is a cons string where the second part is the empty
9802  // string. In that case the subject string is just the first part of the cons
9803  // string. Also in this case the first part of the cons string is known to be
9804  // a sequential string or an external string.
9805  ASSERT(kExternalStringTag !=0);
9806  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
9807  __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
9808  __ b(ne, &runtime);
9809  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
9810  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
9811  __ cmp(r0, r1);
9812  __ b(ne, &runtime);
9813  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
9814  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
9815  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
9816  // Is first part a flat string?
9817  ASSERT_EQ(0, kSeqStringTag);
9818  __ tst(r0, Operand(kStringRepresentationMask));
9819  __ b(nz, &runtime);
9820
9821  __ bind(&seq_string);
9822  // subject: Subject string
9823  // regexp_data: RegExp data (FixedArray)
9824  // r0: Instance type of subject string
9825  ASSERT_EQ(4, kAsciiStringTag);
9826  ASSERT_EQ(0, kTwoByteStringTag);
9827  // Find the code object based on the assumptions above.
9828  __ and_(r0, r0, Operand(kStringEncodingMask));
9829  __ mov(r3, Operand(r0, ASR, 2), SetCC);
9830  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
9831  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
9832
9833  // Check that the irregexp code has been generated for the actual string
9834  // encoding. If it has, the field contains a code object otherwise it contains
9835  // the hole.
9836  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
9837  __ b(ne, &runtime);
9838
9839  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
9840  // r7: code
9841  // subject: Subject string
9842  // regexp_data: RegExp data (FixedArray)
9843  // Load used arguments before starting to push arguments for call to native
9844  // RegExp code to avoid handling changing stack height.
9845  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
9846  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
9847
9848  // r1: previous index
9849  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
9850  // r7: code
9851  // subject: Subject string
9852  // regexp_data: RegExp data (FixedArray)
9853  // All checks done. Now push arguments for native regexp code.
9854  __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
9855
9856  static const int kRegExpExecuteArguments = 7;
9857  __ push(lr);
9858  __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
9859
9860  // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
9861  __ mov(r0, Operand(1));
9862  __ str(r0, MemOperand(sp, 2 * kPointerSize));
9863
9864  // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
9865  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
9866  __ ldr(r0, MemOperand(r0, 0));
9867  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
9868  __ ldr(r2, MemOperand(r2, 0));
9869  __ add(r0, r0, Operand(r2));
9870  __ str(r0, MemOperand(sp, 1 * kPointerSize));
9871
9872  // Argument 5 (sp[0]): static offsets vector buffer.
9873  __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
9874  __ str(r0, MemOperand(sp, 0 * kPointerSize));
9875
9876  // For arguments 4 and 3 get string length, calculate start of string data and
9877  // calculate the shift of the index (0 for ASCII and 1 for two byte).
9878  __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
9879  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
9880  ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
9881  __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9882  __ eor(r3, r3, Operand(1));
9883  // Argument 4 (r3): End of string data
9884  // Argument 3 (r2): Start of string data
9885  __ add(r2, r9, Operand(r1, LSL, r3));
9886  __ add(r3, r9, Operand(r0, LSL, r3));
9887
9888  // Argument 2 (r1): Previous index.
9889  // Already there
9890
9891  // Argument 1 (r0): Subject string.
9892  __ mov(r0, subject);
9893
9894  // Locate the code entry and call it.
9895  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
9896  __ CallCFunction(r7, kRegExpExecuteArguments);
9897  __ pop(lr);
9898
9899  // r0: result
9900  // subject: subject string (callee saved)
9901  // regexp_data: RegExp data (callee saved)
9902  // last_match_info_elements: Last match info elements (callee saved)
9903
9904  // Check the result.
9905  Label success;
9906  __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
9907  __ b(eq, &success);
9908  Label failure;
9909  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
9910  __ b(eq, &failure);
9911  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
9912  // If not exception it can only be retry. Handle that in the runtime system.
9913  __ b(ne, &runtime);
9914  // Result must now be exception. If there is no pending exception already a
9915  // stack overflow (on the backtrack stack) was detected in RegExp code but
9916  // haven't created the exception yet. Handle that in the runtime system.
9917  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
9918  __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
9919  __ ldr(r0, MemOperand(r0, 0));
9920  __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
9921  __ ldr(r1, MemOperand(r1, 0));
9922  __ cmp(r0, r1);
9923  __ b(eq, &runtime);
9924  __ bind(&failure);
9925  // For failure and exception return null.
9926  __ mov(r0, Operand(Factory::null_value()));
9927  __ add(sp, sp, Operand(4 * kPointerSize));
9928  __ Ret();
9929
9930  // Process the result from the native regexp code.
9931  __ bind(&success);
9932  __ ldr(r1,
9933         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
9934  // Calculate number of capture registers (number_of_captures + 1) * 2.
9935  ASSERT_EQ(0, kSmiTag);
9936  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9937  __ add(r1, r1, Operand(2));  // r1 was a smi.
9938
9939  // r1: number of capture registers
9940  // r4: subject string
9941  // Store the capture count.
9942  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi.
9943  __ str(r2, FieldMemOperand(last_match_info_elements,
9944                             RegExpImpl::kLastCaptureCountOffset));
9945  // Store last subject and last input.
9946  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
9947  __ str(subject,
9948         FieldMemOperand(last_match_info_elements,
9949                         RegExpImpl::kLastSubjectOffset));
9950  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
9951  __ str(subject,
9952         FieldMemOperand(last_match_info_elements,
9953                         RegExpImpl::kLastInputOffset));
9954  __ mov(r3, last_match_info_elements);
9955  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
9956
9957  // Get the static offsets vector filled by the native regexp code.
9958  ExternalReference address_of_static_offsets_vector =
9959      ExternalReference::address_of_static_offsets_vector();
9960  __ mov(r2, Operand(address_of_static_offsets_vector));
9961
9962  // r1: number of capture registers
9963  // r2: offsets vector
9964  Label next_capture, done;
9965  // Capture register counter starts from number of capture registers and
9966  // counts down until wraping after zero.
9967  __ add(r0,
9968         last_match_info_elements,
9969         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
9970  __ bind(&next_capture);
9971  __ sub(r1, r1, Operand(1), SetCC);
9972  __ b(mi, &done);
9973  // Read the value from the static offsets vector buffer.
9974  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
9975  // Store the smi value in the last match info.
9976  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
9977  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
9978  __ jmp(&next_capture);
9979  __ bind(&done);
9980
9981  // Return last match info.
9982  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
9983  __ add(sp, sp, Operand(4 * kPointerSize));
9984  __ Ret();
9985
9986  // Do the runtime call to execute the regexp.
9987  __ bind(&runtime);
9988  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9989#endif  // V8_INTERPRETED_REGEXP
9990}
9991
9992
9993void CallFunctionStub::Generate(MacroAssembler* masm) {
9994  Label slow;
9995
9996  // If the receiver might be a value (string, number or boolean) check for this
9997  // and box it if it is.
9998  if (ReceiverMightBeValue()) {
9999    // Get the receiver from the stack.
10000    // function, receiver [, arguments]
10001    Label receiver_is_value, receiver_is_js_object;
10002    __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
10003
10004    // Check if receiver is a smi (which is a number value).
10005    __ BranchOnSmi(r1, &receiver_is_value);
10006
10007    // Check if the receiver is a valid JS object.
10008    __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
10009    __ b(ge, &receiver_is_js_object);
10010
10011    // Call the runtime to box the value.
10012    __ bind(&receiver_is_value);
10013    __ EnterInternalFrame();
10014    __ push(r1);
10015    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
10016    __ LeaveInternalFrame();
10017    __ str(r0, MemOperand(sp, argc_ * kPointerSize));
10018
10019    __ bind(&receiver_is_js_object);
10020  }
10021
10022  // Get the function to call from the stack.
10023  // function, receiver [, arguments]
10024  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
10025
10026  // Check that the function is really a JavaScript function.
10027  // r1: pushed function (to be verified)
10028  __ BranchOnSmi(r1, &slow);
10029  // Get the map of the function object.
10030  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
10031  __ b(ne, &slow);
10032
10033  // Fast-case: Invoke the function now.
10034  // r1: pushed function
10035  ParameterCount actual(argc_);
10036  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
10037
10038  // Slow-case: Non-function called.
10039  __ bind(&slow);
10040  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
10041  // of the original receiver from the call site).
10042  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
10043  __ mov(r0, Operand(argc_));  // Setup the number of arguments.
10044  __ mov(r2, Operand(0));
10045  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
10046  __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
10047          RelocInfo::CODE_TARGET);
10048}
10049
10050
10051// Unfortunately you have to run without snapshots to see most of these
10052// names in the profile since most compare stubs end up in the snapshot.
10053const char* CompareStub::GetName() {
10054  if (name_ != NULL) return name_;
10055  const int kMaxNameLength = 100;
10056  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
10057  if (name_ == NULL) return "OOM";
10058
10059  const char* cc_name;
10060  switch (cc_) {
10061    case lt: cc_name = "LT"; break;
10062    case gt: cc_name = "GT"; break;
10063    case le: cc_name = "LE"; break;
10064    case ge: cc_name = "GE"; break;
10065    case eq: cc_name = "EQ"; break;
10066    case ne: cc_name = "NE"; break;
10067    default: cc_name = "UnknownCondition"; break;
10068  }
10069
10070  const char* strict_name = "";
10071  if (strict_ && (cc_ == eq || cc_ == ne)) {
10072    strict_name = "_STRICT";
10073  }
10074
10075  const char* never_nan_nan_name = "";
10076  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
10077    never_nan_nan_name = "_NO_NAN";
10078  }
10079
10080  const char* include_number_compare_name = "";
10081  if (!include_number_compare_) {
10082    include_number_compare_name = "_NO_NUMBER";
10083  }
10084
10085  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
10086               "CompareStub_%s%s%s%s",
10087               cc_name,
10088               strict_name,
10089               never_nan_nan_name,
10090               include_number_compare_name);
10091  return name_;
10092}
10093
10094
10095int CompareStub::MinorKey() {
10096  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
10097  // stubs the never NaN NaN condition is only taken into account if the
10098  // condition is equals.
10099  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
10100  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
10101         | StrictField::encode(strict_)
10102         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
10103         | IncludeNumberCompareField::encode(include_number_compare_);
10104}
10105
10106
10107// StringCharCodeAtGenerator
10108
10109void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
10110  Label flat_string;
10111  Label ascii_string;
10112  Label got_char_code;
10113
10114  // If the receiver is a smi trigger the non-string case.
10115  __ BranchOnSmi(object_, receiver_not_string_);
10116
10117  // Fetch the instance type of the receiver into result register.
10118  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
10119  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
10120  // If the receiver is not a string trigger the non-string case.
10121  __ tst(result_, Operand(kIsNotStringMask));
10122  __ b(ne, receiver_not_string_);
10123
10124  // If the index is non-smi trigger the non-smi case.
10125  __ BranchOnNotSmi(index_, &index_not_smi_);
10126
10127  // Put smi-tagged index into scratch register.
10128  __ mov(scratch_, index_);
10129  __ bind(&got_smi_index_);
10130
10131  // Check for index out of range.
10132  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
10133  __ cmp(ip, Operand(scratch_));
10134  __ b(ls, index_out_of_range_);
10135
10136  // We need special handling for non-flat strings.
10137  ASSERT(kSeqStringTag == 0);
10138  __ tst(result_, Operand(kStringRepresentationMask));
10139  __ b(eq, &flat_string);
10140
10141  // Handle non-flat strings.
10142  __ tst(result_, Operand(kIsConsStringMask));
10143  __ b(eq, &call_runtime_);
10144
10145  // ConsString.
10146  // Check whether the right hand side is the empty string (i.e. if
10147  // this is really a flat string in a cons string). If that is not
10148  // the case we would rather go to the runtime system now to flatten
10149  // the string.
10150  __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
10151  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
10152  __ cmp(result_, Operand(ip));
10153  __ b(ne, &call_runtime_);
10154  // Get the first of the two strings and load its instance type.
10155  __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
10156  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
10157  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
10158  // If the first cons component is also non-flat, then go to runtime.
10159  ASSERT(kSeqStringTag == 0);
10160  __ tst(result_, Operand(kStringRepresentationMask));
10161  __ b(nz, &call_runtime_);
10162
10163  // Check for 1-byte or 2-byte string.
10164  __ bind(&flat_string);
10165  ASSERT(kAsciiStringTag != 0);
10166  __ tst(result_, Operand(kStringEncodingMask));
10167  __ b(nz, &ascii_string);
10168
10169  // 2-byte string.
10170  // Load the 2-byte character code into the result register. We can
10171  // add without shifting since the smi tag size is the log2 of the
10172  // number of bytes in a two-byte character.
10173  ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
10174  __ add(scratch_, object_, Operand(scratch_));
10175  __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
10176  __ jmp(&got_char_code);
10177
10178  // ASCII string.
10179  // Load the byte into the result register.
10180  __ bind(&ascii_string);
10181  __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
10182  __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
10183
10184  __ bind(&got_char_code);
10185  __ mov(result_, Operand(result_, LSL, kSmiTagSize));
10186  __ bind(&exit_);
10187}
10188
10189
10190void StringCharCodeAtGenerator::GenerateSlow(
10191    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
10192  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
10193
10194  // Index is not a smi.
10195  __ bind(&index_not_smi_);
10196  // If index is a heap number, try converting it to an integer.
10197  __ CheckMap(index_,
10198              scratch_,
10199              Heap::kHeapNumberMapRootIndex,
10200              index_not_number_,
10201              true);
10202  call_helper.BeforeCall(masm);
10203  __ Push(object_, index_);
10204  __ push(index_);  // Consumed by runtime conversion function.
10205  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
10206    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
10207  } else {
10208    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
10209    // NumberToSmi discards numbers that are not exact integers.
10210    __ CallRuntime(Runtime::kNumberToSmi, 1);
10211  }
10212  if (!scratch_.is(r0)) {
10213    // Save the conversion result before the pop instructions below
10214    // have a chance to overwrite it.
10215    __ mov(scratch_, r0);
10216  }
10217  __ pop(index_);
10218  __ pop(object_);
10219  // Reload the instance type.
10220  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
10221  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
10222  call_helper.AfterCall(masm);
10223  // If index is still not a smi, it must be out of range.
10224  __ BranchOnNotSmi(scratch_, index_out_of_range_);
10225  // Otherwise, return to the fast path.
10226  __ jmp(&got_smi_index_);
10227
10228  // Call runtime. We get here when the receiver is a string and the
10229  // index is a number, but the code of getting the actual character
10230  // is too complex (e.g., when the string needs to be flattened).
10231  __ bind(&call_runtime_);
10232  call_helper.BeforeCall(masm);
10233  __ Push(object_, index_);
10234  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
10235  if (!result_.is(r0)) {
10236    __ mov(result_, r0);
10237  }
10238  call_helper.AfterCall(masm);
10239  __ jmp(&exit_);
10240
10241  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
10242}
10243
10244
10245// -------------------------------------------------------------------------
10246// StringCharFromCodeGenerator
10247
10248void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
10249  // Fast case of Heap::LookupSingleCharacterStringFromCode.
10250  ASSERT(kSmiTag == 0);
10251  ASSERT(kSmiShiftSize == 0);
10252  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
10253  __ tst(code_,
10254         Operand(kSmiTagMask |
10255                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
10256  __ b(nz, &slow_case_);
10257
10258  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
10259  // At this point code register contains smi tagged ascii char code.
10260  ASSERT(kSmiTag == 0);
10261  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
10262  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
10263  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
10264  __ cmp(result_, Operand(ip));
10265  __ b(eq, &slow_case_);
10266  __ bind(&exit_);
10267}
10268
10269
10270void StringCharFromCodeGenerator::GenerateSlow(
10271    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
10272  __ Abort("Unexpected fallthrough to CharFromCode slow case");
10273
10274  __ bind(&slow_case_);
10275  call_helper.BeforeCall(masm);
10276  __ push(code_);
10277  __ CallRuntime(Runtime::kCharFromCode, 1);
10278  if (!result_.is(r0)) {
10279    __ mov(result_, r0);
10280  }
10281  call_helper.AfterCall(masm);
10282  __ jmp(&exit_);
10283
10284  __ Abort("Unexpected fallthrough from CharFromCode slow case");
10285}
10286
10287
10288// -------------------------------------------------------------------------
10289// StringCharAtGenerator
10290
10291void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
10292  char_code_at_generator_.GenerateFast(masm);
10293  char_from_code_generator_.GenerateFast(masm);
10294}
10295
10296
10297void StringCharAtGenerator::GenerateSlow(
10298    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
10299  char_code_at_generator_.GenerateSlow(masm, call_helper);
10300  char_from_code_generator_.GenerateSlow(masm, call_helper);
10301}
10302
10303
10304void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
10305                                          Register dest,
10306                                          Register src,
10307                                          Register count,
10308                                          Register scratch,
10309                                          bool ascii) {
10310  Label loop;
10311  Label done;
10312  // This loop just copies one character at a time, as it is only used for very
10313  // short strings.
10314  if (!ascii) {
10315    __ add(count, count, Operand(count), SetCC);
10316  } else {
10317    __ cmp(count, Operand(0));
10318  }
10319  __ b(eq, &done);
10320
10321  __ bind(&loop);
10322  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
10323  // Perform sub between load and dependent store to get the load time to
10324  // complete.
10325  __ sub(count, count, Operand(1), SetCC);
10326  __ strb(scratch, MemOperand(dest, 1, PostIndex));
10327  // last iteration.
10328  __ b(gt, &loop);
10329
10330  __ bind(&done);
10331}
10332
10333
10334enum CopyCharactersFlags {
10335  COPY_ASCII = 1,
10336  DEST_ALWAYS_ALIGNED = 2
10337};
10338
10339
10340void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
10341                                              Register dest,
10342                                              Register src,
10343                                              Register count,
10344                                              Register scratch1,
10345                                              Register scratch2,
10346                                              Register scratch3,
10347                                              Register scratch4,
10348                                              Register scratch5,
10349                                              int flags) {
10350  bool ascii = (flags & COPY_ASCII) != 0;
10351  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
10352
10353  if (dest_always_aligned && FLAG_debug_code) {
10354    // Check that destination is actually word aligned if the flag says
10355    // that it is.
10356    __ tst(dest, Operand(kPointerAlignmentMask));
10357    __ Check(eq, "Destination of copy not aligned.");
10358  }
10359
10360  const int kReadAlignment = 4;
10361  const int kReadAlignmentMask = kReadAlignment - 1;
10362  // Ensure that reading an entire aligned word containing the last character
10363  // of a string will not read outside the allocated area (because we pad up
10364  // to kObjectAlignment).
10365  ASSERT(kObjectAlignment >= kReadAlignment);
10366  // Assumes word reads and writes are little endian.
10367  // Nothing to do for zero characters.
10368  Label done;
10369  if (!ascii) {
10370    __ add(count, count, Operand(count), SetCC);
10371  } else {
10372    __ cmp(count, Operand(0));
10373  }
10374  __ b(eq, &done);
10375
10376  // Assume that you cannot read (or write) unaligned.
10377  Label byte_loop;
10378  // Must copy at least eight bytes, otherwise just do it one byte at a time.
10379  __ cmp(count, Operand(8));
10380  __ add(count, dest, Operand(count));
10381  Register limit = count;  // Read until src equals this.
10382  __ b(lt, &byte_loop);
10383
10384  if (!dest_always_aligned) {
10385    // Align dest by byte copying. Copies between zero and three bytes.
10386    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
10387    Label dest_aligned;
10388    __ b(eq, &dest_aligned);
10389    __ cmp(scratch4, Operand(2));
10390    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
10391    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
10392    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
10393    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
10394    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
10395    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
10396    __ bind(&dest_aligned);
10397  }
10398
10399  Label simple_loop;
10400
10401  __ sub(scratch4, dest, Operand(src));
10402  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
10403  __ b(eq, &simple_loop);
10404  // Shift register is number of bits in a source word that
10405  // must be combined with bits in the next source word in order
10406  // to create a destination word.
10407
10408  // Complex loop for src/dst that are not aligned the same way.
10409  {
10410    Label loop;
10411    __ mov(scratch4, Operand(scratch4, LSL, 3));
10412    Register left_shift = scratch4;
10413    __ and_(src, src, Operand(~3));  // Round down to load previous word.
10414    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
10415    // Store the "shift" most significant bits of scratch in the least
10416    // signficant bits (i.e., shift down by (32-shift)).
10417    __ rsb(scratch2, left_shift, Operand(32));
10418    Register right_shift = scratch2;
10419    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
10420
10421    __ bind(&loop);
10422    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
10423    __ sub(scratch5, limit, Operand(dest));
10424    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
10425    __ str(scratch1, MemOperand(dest, 4, PostIndex));
10426    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
10427    // Loop if four or more bytes left to copy.
10428    // Compare to eight, because we did the subtract before increasing dst.
10429    __ sub(scratch5, scratch5, Operand(8), SetCC);
10430    __ b(ge, &loop);
10431  }
10432  // There is now between zero and three bytes left to copy (negative that
10433  // number is in scratch5), and between one and three bytes already read into
10434  // scratch1 (eight times that number in scratch4). We may have read past
10435  // the end of the string, but because objects are aligned, we have not read
10436  // past the end of the object.
10437  // Find the minimum of remaining characters to move and preloaded characters
10438  // and write those as bytes.
10439  __ add(scratch5, scratch5, Operand(4), SetCC);
10440  __ b(eq, &done);
10441  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
10442  // Move minimum of bytes read and bytes left to copy to scratch4.
10443  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
10444  // Between one and three (value in scratch5) characters already read into
10445  // scratch ready to write.
10446  __ cmp(scratch5, Operand(2));
10447  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
10448  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
10449  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
10450  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
10451  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
10452  // Copy any remaining bytes.
10453  __ b(&byte_loop);
10454
10455  // Simple loop.
10456  // Copy words from src to dst, until less than four bytes left.
10457  // Both src and dest are word aligned.
10458  __ bind(&simple_loop);
10459  {
10460    Label loop;
10461    __ bind(&loop);
10462    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
10463    __ sub(scratch3, limit, Operand(dest));
10464    __ str(scratch1, MemOperand(dest, 4, PostIndex));
10465    // Compare to 8, not 4, because we do the substraction before increasing
10466    // dest.
10467    __ cmp(scratch3, Operand(8));
10468    __ b(ge, &loop);
10469  }
10470
10471  // Copy bytes from src to dst until dst hits limit.
10472  __ bind(&byte_loop);
10473  __ cmp(dest, Operand(limit));
10474  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
10475  __ b(ge, &done);
10476  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
10477  __ b(&byte_loop);
10478
10479  __ bind(&done);
10480}
10481
10482
10483void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
10484                                                        Register c1,
10485                                                        Register c2,
10486                                                        Register scratch1,
10487                                                        Register scratch2,
10488                                                        Register scratch3,
10489                                                        Register scratch4,
10490                                                        Register scratch5,
10491                                                        Label* not_found) {
10492  // Register scratch3 is the general scratch register in this function.
10493  Register scratch = scratch3;
10494
10495  // Make sure that both characters are not digits as such strings has a
10496  // different hash algorithm. Don't try to look for these in the symbol table.
10497  Label not_array_index;
10498  __ sub(scratch, c1, Operand(static_cast<int>('0')));
10499  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
10500  __ b(hi, &not_array_index);
10501  __ sub(scratch, c2, Operand(static_cast<int>('0')));
10502  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
10503
10504  // If check failed combine both characters into single halfword.
10505  // This is required by the contract of the method: code at the
10506  // not_found branch expects this combination in c1 register
10507  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
10508  __ b(ls, not_found);
10509
10510  __ bind(&not_array_index);
10511  // Calculate the two character string hash.
10512  Register hash = scratch1;
10513  StringHelper::GenerateHashInit(masm, hash, c1);
10514  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
10515  StringHelper::GenerateHashGetHash(masm, hash);
10516
10517  // Collect the two characters in a register.
10518  Register chars = c1;
10519  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
10520
10521  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10522  // hash:  hash of two character string.
10523
10524  // Load symbol table
10525  // Load address of first element of the symbol table.
10526  Register symbol_table = c2;
10527  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
10528
10529  // Load undefined value
10530  Register undefined = scratch4;
10531  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
10532
10533  // Calculate capacity mask from the symbol table capacity.
10534  Register mask = scratch2;
10535  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
10536  __ mov(mask, Operand(mask, ASR, 1));
10537  __ sub(mask, mask, Operand(1));
10538
10539  // Calculate untagged address of the first element of the symbol table.
10540  Register first_symbol_table_element = symbol_table;
10541  __ add(first_symbol_table_element, symbol_table,
10542         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
10543
10544  // Registers
10545  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10546  // hash:  hash of two character string
10547  // mask:  capacity mask
10548  // first_symbol_table_element: address of the first element of
10549  //                             the symbol table
10550  // scratch: -
10551
10552  // Perform a number of probes in the symbol table.
10553  static const int kProbes = 4;
10554  Label found_in_symbol_table;
10555  Label next_probe[kProbes];
10556  for (int i = 0; i < kProbes; i++) {
10557    Register candidate = scratch5;  // Scratch register contains candidate.
10558
10559    // Calculate entry in symbol table.
10560    if (i > 0) {
10561      __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
10562    } else {
10563      __ mov(candidate, hash);
10564    }
10565
10566    __ and_(candidate, candidate, Operand(mask));
10567
10568    // Load the entry from the symble table.
10569    ASSERT_EQ(1, SymbolTable::kEntrySize);
10570    __ ldr(candidate,
10571           MemOperand(first_symbol_table_element,
10572                      candidate,
10573                      LSL,
10574                      kPointerSizeLog2));
10575
10576    // If entry is undefined no string with this hash can be found.
10577    __ cmp(candidate, undefined);
10578    __ b(eq, not_found);
10579
10580    // If length is not 2 the string is not a candidate.
10581    __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
10582    __ cmp(scratch, Operand(Smi::FromInt(2)));
10583    __ b(ne, &next_probe[i]);
10584
10585    // Check that the candidate is a non-external ascii string.
10586    __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
10587    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
10588    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
10589                                              &next_probe[i]);
10590
10591    // Check if the two characters match.
10592    // Assumes that word load is little endian.
10593    __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
10594    __ cmp(chars, scratch);
10595    __ b(eq, &found_in_symbol_table);
10596    __ bind(&next_probe[i]);
10597  }
10598
10599  // No matching 2 character string found by probing.
10600  __ jmp(not_found);
10601
10602  // Scratch register contains result when we fall through to here.
10603  Register result = scratch;
10604  __ bind(&found_in_symbol_table);
10605  __ Move(r0, result);
10606}
10607
10608
10609void StringHelper::GenerateHashInit(MacroAssembler* masm,
10610                                    Register hash,
10611                                    Register character) {
10612  // hash = character + (character << 10);
10613  __ add(hash, character, Operand(character, LSL, 10));
10614  // hash ^= hash >> 6;
10615  __ eor(hash, hash, Operand(hash, ASR, 6));
10616}
10617
10618
10619void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
10620                                            Register hash,
10621                                            Register character) {
10622  // hash += character;
10623  __ add(hash, hash, Operand(character));
10624  // hash += hash << 10;
10625  __ add(hash, hash, Operand(hash, LSL, 10));
10626  // hash ^= hash >> 6;
10627  __ eor(hash, hash, Operand(hash, ASR, 6));
10628}
10629
10630
10631void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
10632                                       Register hash) {
10633  // hash += hash << 3;
10634  __ add(hash, hash, Operand(hash, LSL, 3));
10635  // hash ^= hash >> 11;
10636  __ eor(hash, hash, Operand(hash, ASR, 11));
10637  // hash += hash << 15;
10638  __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
10639
10640  // if (hash == 0) hash = 27;
10641  __ mov(hash, Operand(27), LeaveCC, nz);
10642}
10643
10644
10645void SubStringStub::Generate(MacroAssembler* masm) {
10646  Label runtime;
10647
10648  // Stack frame on entry.
10649  //  lr: return address
10650  //  sp[0]: to
10651  //  sp[4]: from
10652  //  sp[8]: string
10653
10654  // This stub is called from the native-call %_SubString(...), so
10655  // nothing can be assumed about the arguments. It is tested that:
10656  //  "string" is a sequential string,
10657  //  both "from" and "to" are smis, and
10658  //  0 <= from <= to <= string.length.
10659  // If any of these assumptions fail, we call the runtime system.
10660
10661  static const int kToOffset = 0 * kPointerSize;
10662  static const int kFromOffset = 1 * kPointerSize;
10663  static const int kStringOffset = 2 * kPointerSize;
10664
10665
10666  // Check bounds and smi-ness.
10667  __ ldr(r7, MemOperand(sp, kToOffset));
10668  __ ldr(r6, MemOperand(sp, kFromOffset));
10669  ASSERT_EQ(0, kSmiTag);
10670  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
10671  // I.e., arithmetic shift right by one un-smi-tags.
10672  __ mov(r2, Operand(r7, ASR, 1), SetCC);
10673  __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
10674  // If either r2 or r6 had the smi tag bit set, then carry is set now.
10675  __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
10676  __ b(mi, &runtime);  // From is negative.
10677
10678  __ sub(r2, r2, Operand(r3), SetCC);
10679  __ b(mi, &runtime);  // Fail if from > to.
10680  // Special handling of sub-strings of length 1 and 2. One character strings
10681  // are handled in the runtime system (looked up in the single character
10682  // cache). Two character strings are looked for in the symbol cache.
10683  __ cmp(r2, Operand(2));
10684  __ b(lt, &runtime);
10685
10686  // r2: length
10687  // r3: from index (untaged smi)
10688  // r6: from (smi)
10689  // r7: to (smi)
10690
10691  // Make sure first argument is a sequential (or flat) string.
10692  __ ldr(r5, MemOperand(sp, kStringOffset));
10693  ASSERT_EQ(0, kSmiTag);
10694  __ tst(r5, Operand(kSmiTagMask));
10695  __ b(eq, &runtime);
10696  Condition is_string = masm->IsObjectStringType(r5, r1);
10697  __ b(NegateCondition(is_string), &runtime);
10698
10699  // r1: instance type
10700  // r2: length
10701  // r3: from index (untaged smi)
10702  // r5: string
10703  // r6: from (smi)
10704  // r7: to (smi)
10705  Label seq_string;
10706  __ and_(r4, r1, Operand(kStringRepresentationMask));
10707  ASSERT(kSeqStringTag < kConsStringTag);
10708  ASSERT(kExternalStringTag > kConsStringTag);
10709  __ cmp(r4, Operand(kConsStringTag));
10710  __ b(gt, &runtime);  // External strings go to runtime.
10711  __ b(lt, &seq_string);  // Sequential strings are handled directly.
10712
10713  // Cons string. Try to recurse (once) on the first substring.
10714  // (This adds a little more generality than necessary to handle flattened
10715  // cons strings, but not much).
10716  __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
10717  __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
10718  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10719  __ tst(r1, Operand(kStringRepresentationMask));
10720  ASSERT_EQ(0, kSeqStringTag);
10721  __ b(ne, &runtime);  // Cons and External strings go to runtime.
10722
10723  // Definitly a sequential string.
10724  __ bind(&seq_string);
10725
10726  // r1: instance type.
10727  // r2: length
10728  // r3: from index (untaged smi)
10729  // r5: string
10730  // r6: from (smi)
10731  // r7: to (smi)
10732  __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
10733  __ cmp(r4, Operand(r7));
10734  __ b(lt, &runtime);  // Fail if to > length.
10735
10736  // r1: instance type.
10737  // r2: result string length.
10738  // r3: from index (untaged smi)
10739  // r5: string.
10740  // r6: from offset (smi)
10741  // Check for flat ascii string.
10742  Label non_ascii_flat;
10743  __ tst(r1, Operand(kStringEncodingMask));
10744  ASSERT_EQ(0, kTwoByteStringTag);
10745  __ b(eq, &non_ascii_flat);
10746
10747  Label result_longer_than_two;
10748  __ cmp(r2, Operand(2));
10749  __ b(gt, &result_longer_than_two);
10750
10751  // Sub string of length 2 requested.
10752  // Get the two characters forming the sub string.
10753  __ add(r5, r5, Operand(r3));
10754  __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
10755  __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
10756
10757  // Try to lookup two character string in symbol table.
10758  Label make_two_character_string;
10759  StringHelper::GenerateTwoCharacterSymbolTableProbe(
10760      masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
10761  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10762  __ add(sp, sp, Operand(3 * kPointerSize));
10763  __ Ret();
10764
10765  // r2: result string length.
10766  // r3: two characters combined into halfword in little endian byte order.
10767  __ bind(&make_two_character_string);
10768  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
10769  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
10770  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10771  __ add(sp, sp, Operand(3 * kPointerSize));
10772  __ Ret();
10773
10774  __ bind(&result_longer_than_two);
10775
10776  // Allocate the result.
10777  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
10778
10779  // r0: result string.
10780  // r2: result string length.
10781  // r5: string.
10782  // r6: from offset (smi)
10783  // Locate first character of result.
10784  __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10785  // Locate 'from' character of string.
10786  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10787  __ add(r5, r5, Operand(r6, ASR, 1));
10788
10789  // r0: result string.
10790  // r1: first character of result string.
10791  // r2: result string length.
10792  // r5: first character of sub string to copy.
10793  ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
10794  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
10795                                           COPY_ASCII | DEST_ALWAYS_ALIGNED);
10796  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10797  __ add(sp, sp, Operand(3 * kPointerSize));
10798  __ Ret();
10799
10800  __ bind(&non_ascii_flat);
10801  // r2: result string length.
10802  // r5: string.
10803  // r6: from offset (smi)
10804  // Check for flat two byte string.
10805
10806  // Allocate the result.
10807  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
10808
10809  // r0: result string.
10810  // r2: result string length.
10811  // r5: string.
10812  // Locate first character of result.
10813  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10814  // Locate 'from' character of string.
10815    __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10816  // As "from" is a smi it is 2 times the value which matches the size of a two
10817  // byte character.
10818  __ add(r5, r5, Operand(r6));
10819
10820  // r0: result string.
10821  // r1: first character of result.
10822  // r2: result length.
10823  // r5: first character of string to copy.
10824  ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
10825  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
10826                                           DEST_ALWAYS_ALIGNED);
10827  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10828  __ add(sp, sp, Operand(3 * kPointerSize));
10829  __ Ret();
10830
10831  // Just jump to runtime to create the sub string.
10832  __ bind(&runtime);
10833  __ TailCallRuntime(Runtime::kSubString, 3, 1);
10834}
10835
10836
10837void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
10838                                                        Register left,
10839                                                        Register right,
10840                                                        Register scratch1,
10841                                                        Register scratch2,
10842                                                        Register scratch3,
10843                                                        Register scratch4) {
10844  Label compare_lengths;
10845  // Find minimum length and length difference.
10846  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
10847  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
10848  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
10849  Register length_delta = scratch3;
10850  __ mov(scratch1, scratch2, LeaveCC, gt);
10851  Register min_length = scratch1;
10852  ASSERT(kSmiTag == 0);
10853  __ tst(min_length, Operand(min_length));
10854  __ b(eq, &compare_lengths);
10855
10856  // Untag smi.
10857  __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
10858
10859  // Setup registers so that we only need to increment one register
10860  // in the loop.
10861  __ add(scratch2, min_length,
10862         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10863  __ add(left, left, Operand(scratch2));
10864  __ add(right, right, Operand(scratch2));
10865  // Registers left and right points to the min_length character of strings.
10866  __ rsb(min_length, min_length, Operand(-1));
10867  Register index = min_length;
10868  // Index starts at -min_length.
10869
10870  {
10871    // Compare loop.
10872    Label loop;
10873    __ bind(&loop);
10874    // Compare characters.
10875    __ add(index, index, Operand(1), SetCC);
10876    __ ldrb(scratch2, MemOperand(left, index), ne);
10877    __ ldrb(scratch4, MemOperand(right, index), ne);
10878    // Skip to compare lengths with eq condition true.
10879    __ b(eq, &compare_lengths);
10880    __ cmp(scratch2, scratch4);
10881    __ b(eq, &loop);
10882    // Fallthrough with eq condition false.
10883  }
10884  // Compare lengths -  strings up to min-length are equal.
10885  __ bind(&compare_lengths);
10886  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
10887  // Use zero length_delta as result.
10888  __ mov(r0, Operand(length_delta), SetCC, eq);
10889  // Fall through to here if characters compare not-equal.
10890  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
10891  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
10892  __ Ret();
10893}
10894
10895
10896void StringCompareStub::Generate(MacroAssembler* masm) {
10897  Label runtime;
10898
10899  // Stack frame on entry.
10900  //  sp[0]: right string
10901  //  sp[4]: left string
10902  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // left
10903  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // right
10904
10905  Label not_same;
10906  __ cmp(r0, r1);
10907  __ b(ne, &not_same);
10908  ASSERT_EQ(0, EQUAL);
10909  ASSERT_EQ(0, kSmiTag);
10910  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
10911  __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
10912  __ add(sp, sp, Operand(2 * kPointerSize));
10913  __ Ret();
10914
10915  __ bind(&not_same);
10916
10917  // Check that both objects are sequential ascii strings.
10918  __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
10919
10920  // Compare flat ascii strings natively. Remove arguments from stack first.
10921  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
10922  __ add(sp, sp, Operand(2 * kPointerSize));
10923  GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
10924
10925  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
10926  // tagged as a small integer.
10927  __ bind(&runtime);
10928  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
10929}
10930
10931
10932void StringAddStub::Generate(MacroAssembler* masm) {
10933  Label string_add_runtime;
10934  // Stack on entry:
10935  // sp[0]: second argument.
10936  // sp[4]: first argument.
10937
10938  // Load the two arguments.
10939  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
10940  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
10941
10942  // Make sure that both arguments are strings if not known in advance.
10943  if (string_check_) {
10944    ASSERT_EQ(0, kSmiTag);
10945    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
10946    // Load instance types.
10947    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
10948    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
10949    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10950    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
10951    ASSERT_EQ(0, kStringTag);
10952    // If either is not a string, go to runtime.
10953    __ tst(r4, Operand(kIsNotStringMask));
10954    __ tst(r5, Operand(kIsNotStringMask), eq);
10955    __ b(ne, &string_add_runtime);
10956  }
10957
10958  // Both arguments are strings.
10959  // r0: first string
10960  // r1: second string
10961  // r4: first string instance type (if string_check_)
10962  // r5: second string instance type (if string_check_)
10963  {
10964    Label strings_not_empty;
10965    // Check if either of the strings are empty. In that case return the other.
10966    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
10967    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
10968    ASSERT(kSmiTag == 0);
10969    __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
10970    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
10971    ASSERT(kSmiTag == 0);
10972     // Else test if second string is empty.
10973    __ cmp(r3, Operand(Smi::FromInt(0)), ne);
10974    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
10975
10976    __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10977    __ add(sp, sp, Operand(2 * kPointerSize));
10978    __ Ret();
10979
10980    __ bind(&strings_not_empty);
10981  }
10982
10983  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
10984  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
10985  // Both strings are non-empty.
10986  // r0: first string
10987  // r1: second string
10988  // r2: length of first string
10989  // r3: length of second string
10990  // r4: first string instance type (if string_check_)
10991  // r5: second string instance type (if string_check_)
10992  // Look at the length of the result of adding the two strings.
10993  Label string_add_flat_result, longer_than_two;
10994  // Adding two lengths can't overflow.
10995  ASSERT(String::kMaxLength * 2 > String::kMaxLength);
10996  __ add(r6, r2, Operand(r3));
10997  // Use the runtime system when adding two one character strings, as it
10998  // contains optimizations for this specific case using the symbol table.
10999  __ cmp(r6, Operand(2));
11000  __ b(ne, &longer_than_two);
11001
11002  // Check that both strings are non-external ascii strings.
11003  if (!string_check_) {
11004    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
11005    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
11006    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
11007    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
11008  }
11009  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
11010                                                  &string_add_runtime);
11011
11012  // Get the two characters forming the sub string.
11013  __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
11014  __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
11015
11016  // Try to lookup two character string in symbol table. If it is not found
11017  // just allocate a new one.
11018  Label make_two_character_string;
11019  StringHelper::GenerateTwoCharacterSymbolTableProbe(
11020      masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
11021  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11022  __ add(sp, sp, Operand(2 * kPointerSize));
11023  __ Ret();
11024
11025  __ bind(&make_two_character_string);
11026  // Resulting string has length 2 and first chars of two strings
11027  // are combined into single halfword in r2 register.
11028  // So we can fill resulting string without two loops by a single
11029  // halfword store instruction (which assumes that processor is
11030  // in a little endian mode)
11031  __ mov(r6, Operand(2));
11032  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
11033  __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
11034  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11035  __ add(sp, sp, Operand(2 * kPointerSize));
11036  __ Ret();
11037
11038  __ bind(&longer_than_two);
11039  // Check if resulting string will be flat.
11040  __ cmp(r6, Operand(String::kMinNonFlatLength));
11041  __ b(lt, &string_add_flat_result);
11042  // Handle exceptionally long strings in the runtime system.
11043  ASSERT((String::kMaxLength & 0x80000000) == 0);
11044  ASSERT(IsPowerOf2(String::kMaxLength + 1));
11045  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
11046  __ cmp(r6, Operand(String::kMaxLength + 1));
11047  __ b(hs, &string_add_runtime);
11048
11049  // If result is not supposed to be flat, allocate a cons string object.
11050  // If both strings are ascii the result is an ascii cons string.
11051  if (!string_check_) {
11052    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
11053    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
11054    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
11055    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
11056  }
11057  Label non_ascii, allocated, ascii_data;
11058  ASSERT_EQ(0, kTwoByteStringTag);
11059  __ tst(r4, Operand(kStringEncodingMask));
11060  __ tst(r5, Operand(kStringEncodingMask), ne);
11061  __ b(eq, &non_ascii);
11062
11063  // Allocate an ASCII cons string.
11064  __ bind(&ascii_data);
11065  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
11066  __ bind(&allocated);
11067  // Fill the fields of the cons string.
11068  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
11069  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
11070  __ mov(r0, Operand(r7));
11071  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11072  __ add(sp, sp, Operand(2 * kPointerSize));
11073  __ Ret();
11074
11075  __ bind(&non_ascii);
11076  // At least one of the strings is two-byte. Check whether it happens
11077  // to contain only ascii characters.
11078  // r4: first instance type.
11079  // r5: second instance type.
11080  __ tst(r4, Operand(kAsciiDataHintMask));
11081  __ tst(r5, Operand(kAsciiDataHintMask), ne);
11082  __ b(ne, &ascii_data);
11083  __ eor(r4, r4, Operand(r5));
11084  ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
11085  __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
11086  __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
11087  __ b(eq, &ascii_data);
11088
11089  // Allocate a two byte cons string.
11090  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
11091  __ jmp(&allocated);
11092
11093  // Handle creating a flat result. First check that both strings are
11094  // sequential and that they have the same encoding.
11095  // r0: first string
11096  // r1: second string
11097  // r2: length of first string
11098  // r3: length of second string
11099  // r4: first string instance type (if string_check_)
11100  // r5: second string instance type (if string_check_)
11101  // r6: sum of lengths.
11102  __ bind(&string_add_flat_result);
11103  if (!string_check_) {
11104    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
11105    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
11106    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
11107    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
11108  }
11109  // Check that both strings are sequential.
11110  ASSERT_EQ(0, kSeqStringTag);
11111  __ tst(r4, Operand(kStringRepresentationMask));
11112  __ tst(r5, Operand(kStringRepresentationMask), eq);
11113  __ b(ne, &string_add_runtime);
11114  // Now check if both strings have the same encoding (ASCII/Two-byte).
11115  // r0: first string.
11116  // r1: second string.
11117  // r2: length of first string.
11118  // r3: length of second string.
11119  // r6: sum of lengths..
11120  Label non_ascii_string_add_flat_result;
11121  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
11122  __ eor(r7, r4, Operand(r5));
11123  __ tst(r7, Operand(kStringEncodingMask));
11124  __ b(ne, &string_add_runtime);
11125  // And see if it's ASCII or two-byte.
11126  __ tst(r4, Operand(kStringEncodingMask));
11127  __ b(eq, &non_ascii_string_add_flat_result);
11128
11129  // Both strings are sequential ASCII strings. We also know that they are
11130  // short (since the sum of the lengths is less than kMinNonFlatLength).
11131  // r6: length of resulting flat string
11132  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
11133  // Locate first character of result.
11134  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
11135  // Locate first character of first argument.
11136  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
11137  // r0: first character of first string.
11138  // r1: second string.
11139  // r2: length of first string.
11140  // r3: length of second string.
11141  // r6: first character of result.
11142  // r7: result string.
11143  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
11144
11145  // Load second argument and locate first character.
11146  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
11147  // r1: first character of second string.
11148  // r3: length of second string.
11149  // r6: next character of result.
11150  // r7: result string.
11151  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
11152  __ mov(r0, Operand(r7));
11153  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11154  __ add(sp, sp, Operand(2 * kPointerSize));
11155  __ Ret();
11156
11157  __ bind(&non_ascii_string_add_flat_result);
11158  // Both strings are sequential two byte strings.
11159  // r0: first string.
11160  // r1: second string.
11161  // r2: length of first string.
11162  // r3: length of second string.
11163  // r6: sum of length of strings.
11164  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
11165  // r0: first string.
11166  // r1: second string.
11167  // r2: length of first string.
11168  // r3: length of second string.
11169  // r7: result string.
11170
11171  // Locate first character of result.
11172  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11173  // Locate first character of first argument.
11174  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11175
11176  // r0: first character of first string.
11177  // r1: second string.
11178  // r2: length of first string.
11179  // r3: length of second string.
11180  // r6: first character of result.
11181  // r7: result string.
11182  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
11183
11184  // Locate first character of second argument.
11185  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
11186
11187  // r1: first character of second string.
11188  // r3: length of second string.
11189  // r6: next character of result (after copy of first string).
11190  // r7: result string.
11191  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
11192
11193  __ mov(r0, Operand(r7));
11194  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
11195  __ add(sp, sp, Operand(2 * kPointerSize));
11196  __ Ret();
11197
11198  // Just jump to runtime to add the two strings.
11199  __ bind(&string_add_runtime);
11200  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
11201}
11202
11203
11204#undef __
11205
11206} }  // namespace v8::internal
11207
11208#endif  // V8_TARGET_ARCH_ARM
11209