codegen-arm.cc revision 9dcf7e2f83591d471e88bf7d230651900b8e424b
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compiler.h"
35#include "debug.h"
36#include "ic-inl.h"
37#include "jsregexp.h"
38#include "jump-target-light-inl.h"
39#include "parser.h"
40#include "regexp-macro-assembler.h"
41#include "regexp-stack.h"
42#include "register-allocator-inl.h"
43#include "runtime.h"
44#include "scopes.h"
45#include "virtual-frame-inl.h"
46#include "virtual-frame-arm-inl.h"
47
48namespace v8 {
49namespace internal {
50
51
52static void EmitIdenticalObjectComparison(MacroAssembler* masm,
53                                          Label* slow,
54                                          Condition cc,
55                                          bool never_nan_nan);
56static void EmitSmiNonsmiComparison(MacroAssembler* masm,
57                                    Label* lhs_not_nan,
58                                    Label* slow,
59                                    bool strict);
60static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
61static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
62static void MultiplyByKnownInt(MacroAssembler* masm,
63                               Register source,
64                               Register destination,
65                               int known_int);
66static bool IsEasyToMultiplyBy(int x);
67
68
69#define __ ACCESS_MASM(masm_)
70
71// -------------------------------------------------------------------------
72// Platform-specific DeferredCode functions.
73
74void DeferredCode::SaveRegisters() {
75  // On ARM you either have a completely spilled frame or you
76  // handle it yourself, but at the moment there's no automation
77  // of registers and deferred code.
78}
79
80
81void DeferredCode::RestoreRegisters() {
82}
83
84
85// -------------------------------------------------------------------------
86// Platform-specific RuntimeCallHelper functions.
87
88void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
89  frame_state_->frame()->AssertIsSpilled();
90}
91
92
93void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
94}
95
96
97void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
98  masm->EnterInternalFrame();
99}
100
101
102void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
103  masm->LeaveInternalFrame();
104}
105
106
107// -------------------------------------------------------------------------
108// CodeGenState implementation.
109
110CodeGenState::CodeGenState(CodeGenerator* owner)
111    : owner_(owner),
112      previous_(owner->state()) {
113  owner->set_state(this);
114}
115
116
117ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
118                                             JumpTarget* true_target,
119                                             JumpTarget* false_target)
120    : CodeGenState(owner),
121      true_target_(true_target),
122      false_target_(false_target) {
123  owner->set_state(this);
124}
125
126
127TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
128                                           Slot* slot,
129                                           TypeInfo type_info)
130    : CodeGenState(owner),
131      slot_(slot) {
132  owner->set_state(this);
133  old_type_info_ = owner->set_type_info(slot, type_info);
134}
135
136
137CodeGenState::~CodeGenState() {
138  ASSERT(owner_->state() == this);
139  owner_->set_state(previous_);
140}
141
142
143TypeInfoCodeGenState::~TypeInfoCodeGenState() {
144  owner()->set_type_info(slot_, old_type_info_);
145}
146
147// -------------------------------------------------------------------------
148// CodeGenerator implementation
149
150CodeGenerator::CodeGenerator(MacroAssembler* masm)
151    : deferred_(8),
152      masm_(masm),
153      info_(NULL),
154      frame_(NULL),
155      allocator_(NULL),
156      cc_reg_(al),
157      state_(NULL),
158      loop_nesting_(0),
159      type_info_(NULL),
160      function_return_is_shadowed_(false) {
161}
162
163
164// Calling conventions:
165// fp: caller's frame pointer
166// sp: stack pointer
167// r1: called JS function
168// cp: callee's context
169
170void CodeGenerator::Generate(CompilationInfo* info) {
171  // Record the position for debugging purposes.
172  CodeForFunctionPosition(info->function());
173  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
174
175  // Initialize state.
176  info_ = info;
177
178  int slots = scope()->num_parameters() + scope()->num_stack_slots();
179  ScopedVector<TypeInfo> type_info_array(slots);
180  type_info_ = &type_info_array;
181
182  ASSERT(allocator_ == NULL);
183  RegisterAllocator register_allocator(this);
184  allocator_ = &register_allocator;
185  ASSERT(frame_ == NULL);
186  frame_ = new VirtualFrame();
187  cc_reg_ = al;
188
189  // Adjust for function-level loop nesting.
190  ASSERT_EQ(0, loop_nesting_);
191  loop_nesting_ = info->loop_nesting();
192
193  {
194    CodeGenState state(this);
195
196    // Entry:
197    // Stack: receiver, arguments
198    // lr: return address
199    // fp: caller's frame pointer
200    // sp: stack pointer
201    // r1: called JS function
202    // cp: callee's context
203    allocator_->Initialize();
204
205#ifdef DEBUG
206    if (strlen(FLAG_stop_at) > 0 &&
207        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
208      frame_->SpillAll();
209      __ stop("stop-at");
210    }
211#endif
212
213    if (info->mode() == CompilationInfo::PRIMARY) {
214      frame_->Enter();
215      // tos: code slot
216
217      // Allocate space for locals and initialize them.  This also checks
218      // for stack overflow.
219      frame_->AllocateStackSlots();
220
221      VirtualFrame::SpilledScope spilled_scope(frame_);
222      int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
223      if (heap_slots > 0) {
224        // Allocate local context.
225        // Get outer context and create a new context based on it.
226        __ ldr(r0, frame_->Function());
227        frame_->EmitPush(r0);
228        if (heap_slots <= FastNewContextStub::kMaximumSlots) {
229          FastNewContextStub stub(heap_slots);
230          frame_->CallStub(&stub, 1);
231        } else {
232          frame_->CallRuntime(Runtime::kNewContext, 1);
233        }
234
235#ifdef DEBUG
236        JumpTarget verified_true;
237        __ cmp(r0, cp);
238        verified_true.Branch(eq);
239        __ stop("NewContext: r0 is expected to be the same as cp");
240        verified_true.Bind();
241#endif
242        // Update context local.
243        __ str(cp, frame_->Context());
244      }
245
246      // TODO(1241774): Improve this code:
247      // 1) only needed if we have a context
248      // 2) no need to recompute context ptr every single time
249      // 3) don't copy parameter operand code from SlotOperand!
250      {
251        Comment cmnt2(masm_, "[ copy context parameters into .context");
252        // Note that iteration order is relevant here! If we have the same
253        // parameter twice (e.g., function (x, y, x)), and that parameter
254        // needs to be copied into the context, it must be the last argument
255        // passed to the parameter that needs to be copied. This is a rare
256        // case so we don't check for it, instead we rely on the copying
257        // order: such a parameter is copied repeatedly into the same
258        // context location and thus the last value is what is seen inside
259        // the function.
260        for (int i = 0; i < scope()->num_parameters(); i++) {
261          Variable* par = scope()->parameter(i);
262          Slot* slot = par->slot();
263          if (slot != NULL && slot->type() == Slot::CONTEXT) {
264            ASSERT(!scope()->is_global_scope());  // No params in global scope.
265            __ ldr(r1, frame_->ParameterAt(i));
266            // Loads r2 with context; used below in RecordWrite.
267            __ str(r1, SlotOperand(slot, r2));
268            // Load the offset into r3.
269            int slot_offset =
270                FixedArray::kHeaderSize + slot->index() * kPointerSize;
271            __ RecordWrite(r2, Operand(slot_offset), r3, r1);
272          }
273        }
274      }
275
276      // Store the arguments object.  This must happen after context
277      // initialization because the arguments object may be stored in
278      // the context.
279      if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
280        StoreArgumentsObject(true);
281      }
282
283      // Initialize ThisFunction reference if present.
284      if (scope()->is_function_scope() && scope()->function() != NULL) {
285        __ mov(ip, Operand(Factory::the_hole_value()));
286        frame_->EmitPush(ip);
287        StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
288      }
289    } else {
290      // When used as the secondary compiler for splitting, r1, cp,
291      // fp, and lr have been pushed on the stack.  Adjust the virtual
292      // frame to match this state.
293      frame_->Adjust(4);
294
295      // Bind all the bailout labels to the beginning of the function.
296      List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
297      for (int i = 0; i < bailouts->length(); i++) {
298        __ bind(bailouts->at(i)->label());
299      }
300    }
301
302    // Initialize the function return target after the locals are set
303    // up, because it needs the expected frame height from the frame.
304    function_return_.SetExpectedHeight();
305    function_return_is_shadowed_ = false;
306
307    // Generate code to 'execute' declarations and initialize functions
308    // (source elements). In case of an illegal redeclaration we need to
309    // handle that instead of processing the declarations.
310    if (scope()->HasIllegalRedeclaration()) {
311      Comment cmnt(masm_, "[ illegal redeclarations");
312      scope()->VisitIllegalRedeclaration(this);
313    } else {
314      Comment cmnt(masm_, "[ declarations");
315      ProcessDeclarations(scope()->declarations());
316      // Bail out if a stack-overflow exception occurred when processing
317      // declarations.
318      if (HasStackOverflow()) return;
319    }
320
321    if (FLAG_trace) {
322      frame_->CallRuntime(Runtime::kTraceEnter, 0);
323      // Ignore the return value.
324    }
325
326    // Compile the body of the function in a vanilla state. Don't
327    // bother compiling all the code if the scope has an illegal
328    // redeclaration.
329    if (!scope()->HasIllegalRedeclaration()) {
330      Comment cmnt(masm_, "[ function body");
331#ifdef DEBUG
332      bool is_builtin = Bootstrapper::IsActive();
333      bool should_trace =
334          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
335      if (should_trace) {
336        frame_->CallRuntime(Runtime::kDebugTrace, 0);
337        // Ignore the return value.
338      }
339#endif
340      VisitStatements(info->function()->body());
341    }
342  }
343
344  // Handle the return from the function.
345  if (has_valid_frame()) {
346    // If there is a valid frame, control flow can fall off the end of
347    // the body.  In that case there is an implicit return statement.
348    ASSERT(!function_return_is_shadowed_);
349    frame_->PrepareForReturn();
350    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
351    if (function_return_.is_bound()) {
352      function_return_.Jump();
353    } else {
354      function_return_.Bind();
355      GenerateReturnSequence();
356    }
357  } else if (function_return_.is_linked()) {
358    // If the return target has dangling jumps to it, then we have not
359    // yet generated the return sequence.  This can happen when (a)
360    // control does not flow off the end of the body so we did not
361    // compile an artificial return statement just above, and (b) there
362    // are return statements in the body but (c) they are all shadowed.
363    function_return_.Bind();
364    GenerateReturnSequence();
365  }
366
367  // Adjust for function-level loop nesting.
368  ASSERT(loop_nesting_ == info->loop_nesting());
369  loop_nesting_ = 0;
370
371  // Code generation state must be reset.
372  ASSERT(!has_cc());
373  ASSERT(state_ == NULL);
374  ASSERT(loop_nesting() == 0);
375  ASSERT(!function_return_is_shadowed_);
376  function_return_.Unuse();
377  DeleteFrame();
378
379  // Process any deferred code using the register allocator.
380  if (!HasStackOverflow()) {
381    ProcessDeferred();
382  }
383
384  allocator_ = NULL;
385  type_info_ = NULL;
386}
387
388
389int CodeGenerator::NumberOfSlot(Slot* slot) {
390  if (slot == NULL) return kInvalidSlotNumber;
391  switch (slot->type()) {
392    case Slot::PARAMETER:
393      return slot->index();
394    case Slot::LOCAL:
395      return slot->index() + scope()->num_parameters();
396    default:
397      break;
398  }
399  return kInvalidSlotNumber;
400}
401
402
403MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
404  // Currently, this assertion will fail if we try to assign to
405  // a constant variable that is constant because it is read-only
406  // (such as the variable referring to a named function expression).
407  // We need to implement assignments to read-only variables.
408  // Ideally, we should do this during AST generation (by converting
409  // such assignments into expression statements); however, in general
410  // we may not be able to make the decision until past AST generation,
411  // that is when the entire program is known.
412  ASSERT(slot != NULL);
413  int index = slot->index();
414  switch (slot->type()) {
415    case Slot::PARAMETER:
416      return frame_->ParameterAt(index);
417
418    case Slot::LOCAL:
419      return frame_->LocalAt(index);
420
421    case Slot::CONTEXT: {
422      // Follow the context chain if necessary.
423      ASSERT(!tmp.is(cp));  // do not overwrite context register
424      Register context = cp;
425      int chain_length = scope()->ContextChainLength(slot->var()->scope());
426      for (int i = 0; i < chain_length; i++) {
427        // Load the closure.
428        // (All contexts, even 'with' contexts, have a closure,
429        // and it is the same for all contexts inside a function.
430        // There is no need to go to the function context first.)
431        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
432        // Load the function context (which is the incoming, outer context).
433        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
434        context = tmp;
435      }
436      // We may have a 'with' context now. Get the function context.
437      // (In fact this mov may never be the needed, since the scope analysis
438      // may not permit a direct context access in this case and thus we are
439      // always at a function context. However it is safe to dereference be-
440      // cause the function context of a function context is itself. Before
441      // deleting this mov we should try to create a counter-example first,
442      // though...)
443      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
444      return ContextOperand(tmp, index);
445    }
446
447    default:
448      UNREACHABLE();
449      return MemOperand(r0, 0);
450  }
451}
452
453
454MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
455    Slot* slot,
456    Register tmp,
457    Register tmp2,
458    JumpTarget* slow) {
459  ASSERT(slot->type() == Slot::CONTEXT);
460  Register context = cp;
461
462  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
463    if (s->num_heap_slots() > 0) {
464      if (s->calls_eval()) {
465        // Check that extension is NULL.
466        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
467        __ tst(tmp2, tmp2);
468        slow->Branch(ne);
469      }
470      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
471      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
472      context = tmp;
473    }
474  }
475  // Check that last extension is NULL.
476  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
477  __ tst(tmp2, tmp2);
478  slow->Branch(ne);
479  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
480  return ContextOperand(tmp, slot->index());
481}
482
483
484// Loads a value on TOS. If it is a boolean value, the result may have been
485// (partially) translated into branches, or it may have set the condition
486// code register. If force_cc is set, the value is forced to set the
487// condition code register and no value is pushed. If the condition code
488// register was set, has_cc() is true and cc_reg_ contains the condition to
489// test for 'true'.
490void CodeGenerator::LoadCondition(Expression* x,
491                                  JumpTarget* true_target,
492                                  JumpTarget* false_target,
493                                  bool force_cc) {
494  ASSERT(!has_cc());
495  int original_height = frame_->height();
496
497  { ConditionCodeGenState new_state(this, true_target, false_target);
498    Visit(x);
499
500    // If we hit a stack overflow, we may not have actually visited
501    // the expression.  In that case, we ensure that we have a
502    // valid-looking frame state because we will continue to generate
503    // code as we unwind the C++ stack.
504    //
505    // It's possible to have both a stack overflow and a valid frame
506    // state (eg, a subexpression overflowed, visiting it returned
507    // with a dummied frame state, and visiting this expression
508    // returned with a normal-looking state).
509    if (HasStackOverflow() &&
510        has_valid_frame() &&
511        !has_cc() &&
512        frame_->height() == original_height) {
513      frame_->SpillAll();
514      true_target->Jump();
515    }
516  }
517  if (force_cc && frame_ != NULL && !has_cc()) {
518    // Convert the TOS value to a boolean in the condition code register.
519    ToBoolean(true_target, false_target);
520  }
521  ASSERT(!force_cc || !has_valid_frame() || has_cc());
522  ASSERT(!has_valid_frame() ||
523         (has_cc() && frame_->height() == original_height) ||
524         (!has_cc() && frame_->height() == original_height + 1));
525}
526
527
528void CodeGenerator::Load(Expression* expr) {
529#ifdef DEBUG
530  int original_height = frame_->height();
531#endif
532  JumpTarget true_target;
533  JumpTarget false_target;
534  LoadCondition(expr, &true_target, &false_target, false);
535
536  if (has_cc()) {
537    // Convert cc_reg_ into a boolean value.
538    VirtualFrame::SpilledScope scope(frame_);
539    JumpTarget loaded;
540    JumpTarget materialize_true;
541    materialize_true.Branch(cc_reg_);
542    __ LoadRoot(r0, Heap::kFalseValueRootIndex);
543    frame_->EmitPush(r0);
544    loaded.Jump();
545    materialize_true.Bind();
546    __ LoadRoot(r0, Heap::kTrueValueRootIndex);
547    frame_->EmitPush(r0);
548    loaded.Bind();
549    cc_reg_ = al;
550  }
551
552  if (true_target.is_linked() || false_target.is_linked()) {
553    VirtualFrame::SpilledScope scope(frame_);
554    // We have at least one condition value that has been "translated"
555    // into a branch, thus it needs to be loaded explicitly.
556    JumpTarget loaded;
557    if (frame_ != NULL) {
558      loaded.Jump();  // Don't lose the current TOS.
559    }
560    bool both = true_target.is_linked() && false_target.is_linked();
561    // Load "true" if necessary.
562    if (true_target.is_linked()) {
563      true_target.Bind();
564      __ LoadRoot(r0, Heap::kTrueValueRootIndex);
565      frame_->EmitPush(r0);
566    }
567    // If both "true" and "false" need to be loaded jump across the code for
568    // "false".
569    if (both) {
570      loaded.Jump();
571    }
572    // Load "false" if necessary.
573    if (false_target.is_linked()) {
574      false_target.Bind();
575      __ LoadRoot(r0, Heap::kFalseValueRootIndex);
576      frame_->EmitPush(r0);
577    }
578    // A value is loaded on all paths reaching this point.
579    loaded.Bind();
580  }
581  ASSERT(has_valid_frame());
582  ASSERT(!has_cc());
583  ASSERT_EQ(original_height + 1, frame_->height());
584}
585
586
587void CodeGenerator::LoadGlobal() {
588  Register reg = frame_->GetTOSRegister();
589  __ ldr(reg, GlobalObject());
590  frame_->EmitPush(reg);
591}
592
593
594void CodeGenerator::LoadGlobalReceiver(Register scratch) {
595  VirtualFrame::SpilledScope spilled_scope(frame_);
596  __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
597  __ ldr(scratch,
598         FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
599  frame_->EmitPush(scratch);
600}
601
602
603ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
604  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
605  ASSERT(scope()->arguments_shadow() != NULL);
606  // We don't want to do lazy arguments allocation for functions that
607  // have heap-allocated contexts, because it interfers with the
608  // uninitialized const tracking in the context objects.
609  return (scope()->num_heap_slots() > 0)
610      ? EAGER_ARGUMENTS_ALLOCATION
611      : LAZY_ARGUMENTS_ALLOCATION;
612}
613
614
615void CodeGenerator::StoreArgumentsObject(bool initial) {
616  VirtualFrame::SpilledScope spilled_scope(frame_);
617
618  ArgumentsAllocationMode mode = ArgumentsMode();
619  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
620
621  Comment cmnt(masm_, "[ store arguments object");
622  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
623    // When using lazy arguments allocation, we store the hole value
624    // as a sentinel indicating that the arguments object hasn't been
625    // allocated yet.
626    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
627    frame_->EmitPush(ip);
628  } else {
629    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
630    __ ldr(r2, frame_->Function());
631    // The receiver is below the arguments, the return address, and the
632    // frame pointer on the stack.
633    const int kReceiverDisplacement = 2 + scope()->num_parameters();
634    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
635    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
636    frame_->Adjust(3);
637    __ Push(r2, r1, r0);
638    frame_->CallStub(&stub, 3);
639    frame_->EmitPush(r0);
640  }
641
642  Variable* arguments = scope()->arguments()->var();
643  Variable* shadow = scope()->arguments_shadow()->var();
644  ASSERT(arguments != NULL && arguments->slot() != NULL);
645  ASSERT(shadow != NULL && shadow->slot() != NULL);
646  JumpTarget done;
647  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
648    // We have to skip storing into the arguments slot if it has
649    // already been written to. This can happen if the a function
650    // has a local variable named 'arguments'.
651    LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
652    frame_->EmitPop(r0);
653    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
654    __ cmp(r0, ip);
655    done.Branch(ne);
656  }
657  StoreToSlot(arguments->slot(), NOT_CONST_INIT);
658  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
659  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
660}
661
662
663void CodeGenerator::LoadTypeofExpression(Expression* expr) {
664  // Special handling of identifiers as subexpressions of typeof.
665  Variable* variable = expr->AsVariableProxy()->AsVariable();
666  if (variable != NULL && !variable->is_this() && variable->is_global()) {
667    // For a global variable we build the property reference
668    // <global>.<variable> and perform a (regular non-contextual) property
669    // load to make sure we do not get reference errors.
670    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
671    Literal key(variable->name());
672    Property property(&global, &key, RelocInfo::kNoPosition);
673    Reference ref(this, &property);
674    ref.GetValue();
675  } else if (variable != NULL && variable->slot() != NULL) {
676    // For a variable that rewrites to a slot, we signal it is the immediate
677    // subexpression of a typeof.
678    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
679  } else {
680    // Anything else can be handled normally.
681    Load(expr);
682  }
683}
684
685
686Reference::Reference(CodeGenerator* cgen,
687                     Expression* expression,
688                     bool persist_after_get)
689    : cgen_(cgen),
690      expression_(expression),
691      type_(ILLEGAL),
692      persist_after_get_(persist_after_get) {
693  cgen->LoadReference(this);
694}
695
696
697Reference::~Reference() {
698  ASSERT(is_unloaded() || is_illegal());
699}
700
701
702void CodeGenerator::LoadReference(Reference* ref) {
703  Comment cmnt(masm_, "[ LoadReference");
704  Expression* e = ref->expression();
705  Property* property = e->AsProperty();
706  Variable* var = e->AsVariableProxy()->AsVariable();
707
708  if (property != NULL) {
709    // The expression is either a property or a variable proxy that rewrites
710    // to a property.
711    Load(property->obj());
712    if (property->key()->IsPropertyName()) {
713      ref->set_type(Reference::NAMED);
714    } else {
715      Load(property->key());
716      ref->set_type(Reference::KEYED);
717    }
718  } else if (var != NULL) {
719    // The expression is a variable proxy that does not rewrite to a
720    // property.  Global variables are treated as named property references.
721    if (var->is_global()) {
722      LoadGlobal();
723      ref->set_type(Reference::NAMED);
724    } else {
725      ASSERT(var->slot() != NULL);
726      ref->set_type(Reference::SLOT);
727    }
728  } else {
729    // Anything else is a runtime error.
730    Load(e);
731    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
732  }
733}
734
735
736void CodeGenerator::UnloadReference(Reference* ref) {
737  int size = ref->size();
738  ref->set_unloaded();
739  if (size == 0) return;
740
741  // Pop a reference from the stack while preserving TOS.
742  VirtualFrame::RegisterAllocationScope scope(this);
743  Comment cmnt(masm_, "[ UnloadReference");
744  if (size > 0) {
745    Register tos = frame_->PopToRegister();
746    frame_->Drop(size);
747    frame_->EmitPush(tos);
748  }
749}
750
751
752// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
753// register to a boolean in the condition code register. The code
754// may jump to 'false_target' in case the register converts to 'false'.
755void CodeGenerator::ToBoolean(JumpTarget* true_target,
756                              JumpTarget* false_target) {
757  VirtualFrame::SpilledScope spilled_scope(frame_);
758  // Note: The generated code snippet does not change stack variables.
759  //       Only the condition code should be set.
760  frame_->EmitPop(r0);
761
762  // Fast case checks
763
764  // Check if the value is 'false'.
765  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
766  __ cmp(r0, ip);
767  false_target->Branch(eq);
768
769  // Check if the value is 'true'.
770  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
771  __ cmp(r0, ip);
772  true_target->Branch(eq);
773
774  // Check if the value is 'undefined'.
775  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
776  __ cmp(r0, ip);
777  false_target->Branch(eq);
778
779  // Check if the value is a smi.
780  __ cmp(r0, Operand(Smi::FromInt(0)));
781  false_target->Branch(eq);
782  __ tst(r0, Operand(kSmiTagMask));
783  true_target->Branch(eq);
784
785  // Slow case: call the runtime.
786  frame_->EmitPush(r0);
787  frame_->CallRuntime(Runtime::kToBool, 1);
788  // Convert the result (r0) to a condition code.
789  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
790  __ cmp(r0, ip);
791
792  cc_reg_ = ne;
793}
794
795
796void CodeGenerator::GenericBinaryOperation(Token::Value op,
797                                           OverwriteMode overwrite_mode,
798                                           GenerateInlineSmi inline_smi,
799                                           int constant_rhs) {
800  // top of virtual frame: y
801  // 2nd elt. on virtual frame : x
802  // result : top of virtual frame
803
804  // Stub is entered with a call: 'return address' is in lr.
805  switch (op) {
806    case Token::ADD:
807    case Token::SUB:
808      if (inline_smi) {
809        JumpTarget done;
810        Register rhs = frame_->PopToRegister();
811        Register lhs = frame_->PopToRegister(rhs);
812        Register scratch = VirtualFrame::scratch0();
813        __ orr(scratch, rhs, Operand(lhs));
814        // Check they are both small and positive.
815        __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
816        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
817        ASSERT_EQ(0, kSmiTag);
818        if (op == Token::ADD) {
819          __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
820        } else {
821          __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
822        }
823        done.Branch(eq);
824        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
825        frame_->SpillAll();
826        frame_->CallStub(&stub, 0);
827        done.Bind();
828        frame_->EmitPush(r0);
829        break;
830      } else {
831        // Fall through!
832      }
833    case Token::BIT_OR:
834    case Token::BIT_AND:
835    case Token::BIT_XOR:
836      if (inline_smi) {
837        bool rhs_is_smi = frame_->KnownSmiAt(0);
838        bool lhs_is_smi = frame_->KnownSmiAt(1);
839        Register rhs = frame_->PopToRegister();
840        Register lhs = frame_->PopToRegister(rhs);
841        Register smi_test_reg;
842        Condition cond;
843        if (!rhs_is_smi || !lhs_is_smi) {
844          if (rhs_is_smi) {
845            smi_test_reg = lhs;
846          } else if (lhs_is_smi) {
847            smi_test_reg = rhs;
848          } else {
849            smi_test_reg = VirtualFrame::scratch0();
850            __ orr(smi_test_reg, rhs, Operand(lhs));
851          }
852          // Check they are both Smis.
853          __ tst(smi_test_reg, Operand(kSmiTagMask));
854          cond = eq;
855        } else {
856          cond = al;
857        }
858        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
859        if (op == Token::BIT_OR) {
860          __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
861        } else if (op == Token::BIT_AND) {
862          __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
863        } else {
864          ASSERT(op == Token::BIT_XOR);
865          ASSERT_EQ(0, kSmiTag);
866          __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
867        }
868        if (cond != al) {
869          JumpTarget done;
870          done.Branch(cond);
871          GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
872          frame_->SpillAll();
873          frame_->CallStub(&stub, 0);
874          done.Bind();
875        }
876        frame_->EmitPush(r0);
877        break;
878      } else {
879        // Fall through!
880      }
881    case Token::MUL:
882    case Token::DIV:
883    case Token::MOD:
884    case Token::SHL:
885    case Token::SHR:
886    case Token::SAR: {
887      Register rhs = frame_->PopToRegister();
888      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
889      GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
890      frame_->SpillAll();
891      frame_->CallStub(&stub, 0);
892      frame_->EmitPush(r0);
893      break;
894    }
895
896    case Token::COMMA: {
897      Register scratch = frame_->PopToRegister();
898      // Simply discard left value.
899      frame_->Drop();
900      frame_->EmitPush(scratch);
901      break;
902    }
903
904    default:
905      // Other cases should have been handled before this point.
906      UNREACHABLE();
907      break;
908  }
909}
910
911
912class DeferredInlineSmiOperation: public DeferredCode {
913 public:
914  DeferredInlineSmiOperation(Token::Value op,
915                             int value,
916                             bool reversed,
917                             OverwriteMode overwrite_mode,
918                             Register tos)
919      : op_(op),
920        value_(value),
921        reversed_(reversed),
922        overwrite_mode_(overwrite_mode),
923        tos_register_(tos) {
924    set_comment("[ DeferredInlinedSmiOperation");
925  }
926
927  virtual void Generate();
928
929 private:
930  Token::Value op_;
931  int value_;
932  bool reversed_;
933  OverwriteMode overwrite_mode_;
934  Register tos_register_;
935};
936
937
938void DeferredInlineSmiOperation::Generate() {
939  Register lhs = r1;
940  Register rhs = r0;
941  switch (op_) {
942    case Token::ADD: {
943      // Revert optimistic add.
944      if (reversed_) {
945        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
946        __ mov(r1, Operand(Smi::FromInt(value_)));
947      } else {
948        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
949        __ mov(r0, Operand(Smi::FromInt(value_)));
950      }
951      break;
952    }
953
954    case Token::SUB: {
955      // Revert optimistic sub.
956      if (reversed_) {
957        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
958        __ mov(r1, Operand(Smi::FromInt(value_)));
959      } else {
960        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
961        __ mov(r0, Operand(Smi::FromInt(value_)));
962      }
963      break;
964    }
965
966    // For these operations there is no optimistic operation that needs to be
967    // reverted.
968    case Token::MUL:
969    case Token::MOD:
970    case Token::BIT_OR:
971    case Token::BIT_XOR:
972    case Token::BIT_AND: {
973      if (reversed_) {
974        if (tos_register_.is(r0)) {
975          __ mov(r1, Operand(Smi::FromInt(value_)));
976        } else {
977          ASSERT(tos_register_.is(r1));
978          __ mov(r0, Operand(Smi::FromInt(value_)));
979          lhs = r0;
980          rhs = r1;
981        }
982      } else {
983        if (tos_register_.is(r1)) {
984          __ mov(r0, Operand(Smi::FromInt(value_)));
985        } else {
986          ASSERT(tos_register_.is(r0));
987          __ mov(r1, Operand(Smi::FromInt(value_)));
988          lhs = r0;
989          rhs = r1;
990        }
991      }
992      break;
993    }
994
995    case Token::SHL:
996    case Token::SHR:
997    case Token::SAR: {
998      if (!reversed_) {
999        if (tos_register_.is(r1)) {
1000          __ mov(r0, Operand(Smi::FromInt(value_)));
1001        } else {
1002          ASSERT(tos_register_.is(r0));
1003          __ mov(r1, Operand(Smi::FromInt(value_)));
1004          lhs = r0;
1005          rhs = r1;
1006        }
1007      } else {
1008        ASSERT(op_ == Token::SHL);
1009        __ mov(r1, Operand(Smi::FromInt(value_)));
1010      }
1011      break;
1012    }
1013
1014    default:
1015      // Other cases should have been handled before this point.
1016      UNREACHABLE();
1017      break;
1018  }
1019
1020  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
1021  __ CallStub(&stub);
1022  // The generic stub returns its value in r0, but that's not
1023  // necessarily what we want.  We want whatever the inlined code
1024  // expected, which is that the answer is in the same register as
1025  // the operand was.
1026  __ Move(tos_register_, r0);
1027}
1028
1029
1030static bool PopCountLessThanEqual2(unsigned int x) {
1031  x &= x - 1;
1032  return (x & (x - 1)) == 0;
1033}
1034
1035
1036// Returns the index of the lowest bit set.
1037static int BitPosition(unsigned x) {
1038  int bit_posn = 0;
1039  while ((x & 0xf) == 0) {
1040    bit_posn += 4;
1041    x >>= 4;
1042  }
1043  while ((x & 1) == 0) {
1044    bit_posn++;
1045    x >>= 1;
1046  }
1047  return bit_posn;
1048}
1049
1050
1051void CodeGenerator::SmiOperation(Token::Value op,
1052                                 Handle<Object> value,
1053                                 bool reversed,
1054                                 OverwriteMode mode) {
1055  int int_value = Smi::cast(*value)->value();
1056
1057  bool both_sides_are_smi = frame_->KnownSmiAt(0);
1058
1059  bool something_to_inline;
1060  switch (op) {
1061    case Token::ADD:
1062    case Token::SUB:
1063    case Token::BIT_AND:
1064    case Token::BIT_OR:
1065    case Token::BIT_XOR: {
1066      something_to_inline = true;
1067      break;
1068    }
1069    case Token::SHL: {
1070      something_to_inline = (both_sides_are_smi || !reversed);
1071      break;
1072    }
1073    case Token::SHR:
1074    case Token::SAR: {
1075      if (reversed) {
1076        something_to_inline = false;
1077      } else {
1078        something_to_inline = true;
1079      }
1080      break;
1081    }
1082    case Token::MOD: {
1083      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
1084        something_to_inline = false;
1085      } else {
1086        something_to_inline = true;
1087      }
1088      break;
1089    }
1090    case Token::MUL: {
1091      if (!IsEasyToMultiplyBy(int_value)) {
1092        something_to_inline = false;
1093      } else {
1094        something_to_inline = true;
1095      }
1096      break;
1097    }
1098    default: {
1099      something_to_inline = false;
1100      break;
1101    }
1102  }
1103
1104  if (!something_to_inline) {
1105    if (!reversed) {
1106      // Push the rhs onto the virtual frame by putting it in a TOS register.
1107      Register rhs = frame_->GetTOSRegister();
1108      __ mov(rhs, Operand(value));
1109      frame_->EmitPush(rhs, TypeInfo::Smi());
1110      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
1111    } else {
1112      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
1113      // at most one pop, the rest takes place in TOS registers.
1114      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
1115      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
1116      __ mov(lhs, Operand(value));
1117      frame_->EmitPush(lhs, TypeInfo::Smi());
1118      TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
1119      frame_->EmitPush(rhs, t);
1120      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue);
1121    }
1122    return;
1123  }
1124
1125  // We move the top of stack to a register (normally no move is invoved).
1126  Register tos = frame_->PopToRegister();
1127  // All other registers are spilled.  The deferred code expects one argument
1128  // in a register and all other values are flushed to the stack.  The
1129  // answer is returned in the same register that the top of stack argument was
1130  // in.
1131  frame_->SpillAll();
1132
1133  switch (op) {
1134    case Token::ADD: {
1135      DeferredCode* deferred =
1136          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1137
1138      __ add(tos, tos, Operand(value), SetCC);
1139      deferred->Branch(vs);
1140      if (!both_sides_are_smi) {
1141        __ tst(tos, Operand(kSmiTagMask));
1142        deferred->Branch(ne);
1143      }
1144      deferred->BindExit();
1145      frame_->EmitPush(tos);
1146      break;
1147    }
1148
1149    case Token::SUB: {
1150      DeferredCode* deferred =
1151          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1152
1153      if (reversed) {
1154        __ rsb(tos, tos, Operand(value), SetCC);
1155      } else {
1156        __ sub(tos, tos, Operand(value), SetCC);
1157      }
1158      deferred->Branch(vs);
1159      if (!both_sides_are_smi) {
1160        __ tst(tos, Operand(kSmiTagMask));
1161        deferred->Branch(ne);
1162      }
1163      deferred->BindExit();
1164      frame_->EmitPush(tos);
1165      break;
1166    }
1167
1168
1169    case Token::BIT_OR:
1170    case Token::BIT_XOR:
1171    case Token::BIT_AND: {
1172      if (both_sides_are_smi) {
1173        switch (op) {
1174          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1175          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1176          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1177          default: UNREACHABLE();
1178        }
1179        frame_->EmitPush(tos, TypeInfo::Smi());
1180      } else {
1181        DeferredCode* deferred =
1182          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1183        __ tst(tos, Operand(kSmiTagMask));
1184        deferred->Branch(ne);
1185        switch (op) {
1186          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
1187          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
1188          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
1189          default: UNREACHABLE();
1190        }
1191        deferred->BindExit();
1192        TypeInfo result_type =
1193            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
1194        frame_->EmitPush(tos, result_type);
1195      }
1196      break;
1197    }
1198
1199    case Token::SHL:
1200      if (reversed) {
1201        ASSERT(both_sides_are_smi);
1202        int max_shift = 0;
1203        int max_result = int_value == 0 ? 1 : int_value;
1204        while (Smi::IsValid(max_result << 1)) {
1205          max_shift++;
1206          max_result <<= 1;
1207        }
1208        DeferredCode* deferred =
1209          new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
1210        // Mask off the last 5 bits of the shift operand (rhs).  This is part
1211        // of the definition of shift in JS and we know we have a Smi so we
1212        // can safely do this.  The masked version gets passed to the
1213        // deferred code, but that makes no difference.
1214        __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
1215        __ cmp(tos, Operand(Smi::FromInt(max_shift)));
1216        deferred->Branch(ge);
1217        Register scratch = VirtualFrame::scratch0();
1218        __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Untag.
1219        __ mov(tos, Operand(Smi::FromInt(int_value)));    // Load constant.
1220        __ mov(tos, Operand(tos, LSL, scratch));          // Shift constant.
1221        deferred->BindExit();
1222        TypeInfo result = TypeInfo::Integer32();
1223        frame_->EmitPush(tos, result);
1224        break;
1225      }
1226      // Fall through!
1227    case Token::SHR:
1228    case Token::SAR: {
1229      ASSERT(!reversed);
1230      TypeInfo result = TypeInfo::Integer32();
1231      Register scratch = VirtualFrame::scratch0();
1232      Register scratch2 = VirtualFrame::scratch1();
1233      int shift_value = int_value & 0x1f;  // least significant 5 bits
1234      DeferredCode* deferred =
1235        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
1236      uint32_t problematic_mask = kSmiTagMask;
1237      // For unsigned shift by zero all negative smis are problematic.
1238      bool skip_smi_test = both_sides_are_smi;
1239      if (shift_value == 0 && op == Token::SHR) {
1240        problematic_mask |= 0x80000000;
1241        skip_smi_test = false;
1242      }
1243      if (!skip_smi_test) {
1244        __ tst(tos, Operand(problematic_mask));
1245        deferred->Branch(ne);  // Go slow for problematic input.
1246      }
1247      switch (op) {
1248        case Token::SHL: {
1249          if (shift_value != 0) {
1250            int adjusted_shift = shift_value - kSmiTagSize;
1251            ASSERT(adjusted_shift >= 0);
1252            if (adjusted_shift != 0) {
1253              __ mov(scratch, Operand(tos, LSL, adjusted_shift));
1254              // Check that the *signed* result fits in a smi.
1255              __ add(scratch2, scratch, Operand(0x40000000), SetCC);
1256              deferred->Branch(mi);
1257              __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1258            } else {
1259              // Check that the *signed* result fits in a smi.
1260              __ add(scratch2, tos, Operand(0x40000000), SetCC);
1261              deferred->Branch(mi);
1262              __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1263            }
1264          }
1265          break;
1266        }
1267        case Token::SHR: {
1268          if (shift_value != 0) {
1269            __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Remove tag.
1270            // LSR by immediate 0 means shifting 32 bits.
1271            __ mov(scratch, Operand(scratch, LSR, shift_value));
1272            if (shift_value == 1) {
1273              // check that the *unsigned* result fits in a smi
1274              // neither of the two high-order bits can be set:
1275              // - 0x80000000: high bit would be lost when smi tagging
1276              // - 0x40000000: this number would convert to negative when
1277              // smi tagging these two cases can only happen with shifts
1278              // by 0 or 1 when handed a valid smi
1279              __ tst(scratch, Operand(0xc0000000));
1280              deferred->Branch(ne);
1281            } else {
1282              ASSERT(shift_value >= 2);
1283              result = TypeInfo::Smi();  // SHR by at least 2 gives a Smi.
1284            }
1285            __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
1286          }
1287          break;
1288        }
1289        case Token::SAR: {
1290          // In the ARM instructions set, ASR by immediate 0 means shifting 32
1291          // bits.
1292          if (shift_value != 0) {
1293            // Do the shift and the tag removal in one operation.  If the shift
1294            // is 31 bits (the highest possible value) then we emit the
1295            // instruction as a shift by 0 which means shift arithmetically by
1296            // 32.
1297            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
1298            // Put tag back.
1299            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
1300            // SAR by at least 1 gives a Smi.
1301            result = TypeInfo::Smi();
1302          }
1303          break;
1304        }
1305        default: UNREACHABLE();
1306      }
1307      deferred->BindExit();
1308      frame_->EmitPush(tos, result);
1309      break;
1310    }
1311
1312    case Token::MOD: {
1313      ASSERT(!reversed);
1314      ASSERT(int_value >= 2);
1315      ASSERT(IsPowerOf2(int_value));
1316      DeferredCode* deferred =
1317          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1318      unsigned mask = (0x80000000u | kSmiTagMask);
1319      __ tst(tos, Operand(mask));
1320      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
1321      mask = (int_value << kSmiTagSize) - 1;
1322      __ and_(tos, tos, Operand(mask));
1323      deferred->BindExit();
1324      // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
1325      frame_->EmitPush(
1326          tos,
1327          both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
1328      break;
1329    }
1330
1331    case Token::MUL: {
1332      ASSERT(IsEasyToMultiplyBy(int_value));
1333      DeferredCode* deferred =
1334          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
1335      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
1336      max_smi_that_wont_overflow <<= kSmiTagSize;
1337      unsigned mask = 0x80000000u;
1338      while ((mask & max_smi_that_wont_overflow) == 0) {
1339        mask |= mask >> 1;
1340      }
1341      mask |= kSmiTagMask;
1342      // This does a single mask that checks for a too high value in a
1343      // conservative way and for a non-Smi.  It also filters out negative
1344      // numbers, unfortunately, but since this code is inline we prefer
1345      // brevity to comprehensiveness.
1346      __ tst(tos, Operand(mask));
1347      deferred->Branch(ne);
1348      MultiplyByKnownInt(masm_, tos, tos, int_value);
1349      deferred->BindExit();
1350      frame_->EmitPush(tos);
1351      break;
1352    }
1353
1354    default:
1355      UNREACHABLE();
1356      break;
1357  }
1358}
1359
1360
1361void CodeGenerator::Comparison(Condition cc,
1362                               Expression* left,
1363                               Expression* right,
1364                               bool strict) {
1365  VirtualFrame::RegisterAllocationScope scope(this);
1366
1367  if (left != NULL) Load(left);
1368  if (right != NULL) Load(right);
1369
1370  // sp[0] : y
1371  // sp[1] : x
1372  // result : cc register
1373
1374  // Strict only makes sense for equality comparisons.
1375  ASSERT(!strict || cc == eq);
1376
1377  Register lhs;
1378  Register rhs;
1379
1380  bool lhs_is_smi;
1381  bool rhs_is_smi;
1382
1383  // We load the top two stack positions into registers chosen by the virtual
1384  // frame.  This should keep the register shuffling to a minimum.
1385  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1386  if (cc == gt || cc == le) {
1387    cc = ReverseCondition(cc);
1388    lhs_is_smi = frame_->KnownSmiAt(0);
1389    rhs_is_smi = frame_->KnownSmiAt(1);
1390    lhs = frame_->PopToRegister();
1391    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
1392  } else {
1393    rhs_is_smi = frame_->KnownSmiAt(0);
1394    lhs_is_smi = frame_->KnownSmiAt(1);
1395    rhs = frame_->PopToRegister();
1396    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
1397  }
1398
1399  bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
1400
1401  ASSERT(rhs.is(r0) || rhs.is(r1));
1402  ASSERT(lhs.is(r0) || lhs.is(r1));
1403
1404  JumpTarget exit;
1405
1406  if (!both_sides_are_smi) {
1407    // Now we have the two sides in r0 and r1.  We flush any other registers
1408    // because the stub doesn't know about register allocation.
1409    frame_->SpillAll();
1410    Register scratch = VirtualFrame::scratch0();
1411    Register smi_test_reg;
1412    if (lhs_is_smi) {
1413      smi_test_reg = rhs;
1414    } else if (rhs_is_smi) {
1415      smi_test_reg = lhs;
1416    } else {
1417      __ orr(scratch, lhs, Operand(rhs));
1418      smi_test_reg = scratch;
1419    }
1420    __ tst(smi_test_reg, Operand(kSmiTagMask));
1421    JumpTarget smi;
1422    smi.Branch(eq);
1423
1424    // Perform non-smi comparison by stub.
1425    // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1426    // We call with 0 args because there are 0 on the stack.
1427    if (!rhs.is(r0)) {
1428      __ Swap(rhs, lhs, ip);
1429    }
1430
1431    CompareStub stub(cc, strict);
1432    frame_->CallStub(&stub, 0);
1433    __ cmp(r0, Operand(0));
1434    exit.Jump();
1435
1436    smi.Bind();
1437  }
1438
1439  // Do smi comparisons by pointer comparison.
1440  __ cmp(lhs, Operand(rhs));
1441
1442  exit.Bind();
1443  cc_reg_ = cc;
1444}
1445
1446
1447// Call the function on the stack with the given arguments.
1448void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1449                                      CallFunctionFlags flags,
1450                                      int position) {
1451  frame_->AssertIsSpilled();
1452
1453  // Push the arguments ("left-to-right") on the stack.
1454  int arg_count = args->length();
1455  for (int i = 0; i < arg_count; i++) {
1456    Load(args->at(i));
1457  }
1458
1459  // Record the position for debugging purposes.
1460  CodeForSourcePosition(position);
1461
1462  // Use the shared code stub to call the function.
1463  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
1464  CallFunctionStub call_function(arg_count, in_loop, flags);
1465  frame_->CallStub(&call_function, arg_count + 1);
1466
1467  // Restore context and pop function from the stack.
1468  __ ldr(cp, frame_->Context());
1469  frame_->Drop();  // discard the TOS
1470}
1471
1472
1473void CodeGenerator::CallApplyLazy(Expression* applicand,
1474                                  Expression* receiver,
1475                                  VariableProxy* arguments,
1476                                  int position) {
1477  // An optimized implementation of expressions of the form
1478  // x.apply(y, arguments).
1479  // If the arguments object of the scope has not been allocated,
1480  // and x.apply is Function.prototype.apply, this optimization
1481  // just copies y and the arguments of the current function on the
1482  // stack, as receiver and arguments, and calls x.
1483  // In the implementation comments, we call x the applicand
1484  // and y the receiver.
1485  VirtualFrame::SpilledScope spilled_scope(frame_);
1486
1487  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
1488  ASSERT(arguments->IsArguments());
1489
1490  // Load applicand.apply onto the stack. This will usually
1491  // give us a megamorphic load site. Not super, but it works.
1492  Load(applicand);
1493  Handle<String> name = Factory::LookupAsciiSymbol("apply");
1494  frame_->Dup();
1495  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
1496  frame_->EmitPush(r0);
1497
1498  // Load the receiver and the existing arguments object onto the
1499  // expression stack. Avoid allocating the arguments object here.
1500  Load(receiver);
1501  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
1502
1503  // Emit the source position information after having loaded the
1504  // receiver and the arguments.
1505  CodeForSourcePosition(position);
1506  // Contents of the stack at this point:
1507  //   sp[0]: arguments object of the current function or the hole.
1508  //   sp[1]: receiver
1509  //   sp[2]: applicand.apply
1510  //   sp[3]: applicand.
1511
1512  // Check if the arguments object has been lazily allocated
1513  // already. If so, just use that instead of copying the arguments
1514  // from the stack. This also deals with cases where a local variable
1515  // named 'arguments' has been introduced.
1516  __ ldr(r0, MemOperand(sp, 0));
1517
1518  Label slow, done;
1519  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1520  __ cmp(ip, r0);
1521  __ b(ne, &slow);
1522
1523  Label build_args;
1524  // Get rid of the arguments object probe.
1525  frame_->Drop();
1526  // Stack now has 3 elements on it.
1527  // Contents of stack at this point:
1528  //   sp[0]: receiver
1529  //   sp[1]: applicand.apply
1530  //   sp[2]: applicand.
1531
1532  // Check that the receiver really is a JavaScript object.
1533  __ ldr(r0, MemOperand(sp, 0));
1534  __ BranchOnSmi(r0, &build_args);
1535  // We allow all JSObjects including JSFunctions.  As long as
1536  // JS_FUNCTION_TYPE is the last instance type and it is right
1537  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1538  // bound.
1539  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1540  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1541  __ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
1542  __ b(lt, &build_args);
1543
1544  // Check that applicand.apply is Function.prototype.apply.
1545  __ ldr(r0, MemOperand(sp, kPointerSize));
1546  __ BranchOnSmi(r0, &build_args);
1547  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1548  __ b(ne, &build_args);
1549  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
1550  Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1551  __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
1552  __ cmp(r1, Operand(apply_code));
1553  __ b(ne, &build_args);
1554
1555  // Check that applicand is a function.
1556  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1557  __ BranchOnSmi(r1, &build_args);
1558  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1559  __ b(ne, &build_args);
1560
1561  // Copy the arguments to this function possibly from the
1562  // adaptor frame below it.
1563  Label invoke, adapted;
1564  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1565  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1566  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1567  __ b(eq, &adapted);
1568
1569  // No arguments adaptor frame. Copy fixed number of arguments.
1570  __ mov(r0, Operand(scope()->num_parameters()));
1571  for (int i = 0; i < scope()->num_parameters(); i++) {
1572    __ ldr(r2, frame_->ParameterAt(i));
1573    __ push(r2);
1574  }
1575  __ jmp(&invoke);
1576
1577  // Arguments adaptor frame present. Copy arguments from there, but
1578  // avoid copying too many arguments to avoid stack overflows.
1579  __ bind(&adapted);
1580  static const uint32_t kArgumentsLimit = 1 * KB;
1581  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1582  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
1583  __ mov(r3, r0);
1584  __ cmp(r0, Operand(kArgumentsLimit));
1585  __ b(gt, &build_args);
1586
1587  // Loop through the arguments pushing them onto the execution
1588  // stack. We don't inform the virtual frame of the push, so we don't
1589  // have to worry about getting rid of the elements from the virtual
1590  // frame.
1591  Label loop;
1592  // r3 is a small non-negative integer, due to the test above.
1593  __ cmp(r3, Operand(0));
1594  __ b(eq, &invoke);
1595  // Compute the address of the first argument.
1596  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
1597  __ add(r2, r2, Operand(kPointerSize));
1598  __ bind(&loop);
1599  // Post-decrement argument address by kPointerSize on each iteration.
1600  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
1601  __ push(r4);
1602  __ sub(r3, r3, Operand(1), SetCC);
1603  __ b(gt, &loop);
1604
1605  // Invoke the function.
1606  __ bind(&invoke);
1607  ParameterCount actual(r0);
1608  __ InvokeFunction(r1, actual, CALL_FUNCTION);
1609  // Drop applicand.apply and applicand from the stack, and push
1610  // the result of the function call, but leave the spilled frame
1611  // unchanged, with 3 elements, so it is correct when we compile the
1612  // slow-case code.
1613  __ add(sp, sp, Operand(2 * kPointerSize));
1614  __ push(r0);
1615  // Stack now has 1 element:
1616  //   sp[0]: result
1617  __ jmp(&done);
1618
1619  // Slow-case: Allocate the arguments object since we know it isn't
1620  // there, and fall-through to the slow-case where we call
1621  // applicand.apply.
1622  __ bind(&build_args);
1623  // Stack now has 3 elements, because we have jumped from where:
1624  //   sp[0]: receiver
1625  //   sp[1]: applicand.apply
1626  //   sp[2]: applicand.
1627  StoreArgumentsObject(false);
1628
1629  // Stack and frame now have 4 elements.
1630  __ bind(&slow);
1631
1632  // Generic computation of x.apply(y, args) with no special optimization.
1633  // Flip applicand.apply and applicand on the stack, so
1634  // applicand looks like the receiver of the applicand.apply call.
1635  // Then process it as a normal function call.
1636  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
1637  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1638  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
1639
1640  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
1641  frame_->CallStub(&call_function, 3);
1642  // The function and its two arguments have been dropped.
1643  frame_->Drop();  // Drop the receiver as well.
1644  frame_->EmitPush(r0);
1645  // Stack now has 1 element:
1646  //   sp[0]: result
1647  __ bind(&done);
1648
1649  // Restore the context register after a call.
1650  __ ldr(cp, frame_->Context());
1651}
1652
1653
1654void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1655  VirtualFrame::SpilledScope spilled_scope(frame_);
1656  ASSERT(has_cc());
1657  Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1658  target->Branch(cc);
1659  cc_reg_ = al;
1660}
1661
1662
1663void CodeGenerator::CheckStack() {
1664  VirtualFrame::SpilledScope spilled_scope(frame_);
1665  Comment cmnt(masm_, "[ check stack");
1666  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1667  // Put the lr setup instruction in the delay slot.  kInstrSize is added to
1668  // the implicit 8 byte offset that always applies to operations with pc and
1669  // gives a return address 12 bytes down.
1670  masm_->add(lr, pc, Operand(Assembler::kInstrSize));
1671  masm_->cmp(sp, Operand(ip));
1672  StackCheckStub stub;
1673  // Call the stub if lower.
1674  masm_->mov(pc,
1675             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
1676                     RelocInfo::CODE_TARGET),
1677             LeaveCC,
1678             lo);
1679}
1680
1681
1682void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
1683#ifdef DEBUG
1684  int original_height = frame_->height();
1685#endif
1686  VirtualFrame::SpilledScope spilled_scope(frame_);
1687  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
1688    Visit(statements->at(i));
1689  }
1690  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1691}
1692
1693
1694void CodeGenerator::VisitBlock(Block* node) {
1695#ifdef DEBUG
1696  int original_height = frame_->height();
1697#endif
1698  VirtualFrame::SpilledScope spilled_scope(frame_);
1699  Comment cmnt(masm_, "[ Block");
1700  CodeForStatementPosition(node);
1701  node->break_target()->SetExpectedHeight();
1702  VisitStatements(node->statements());
1703  if (node->break_target()->is_linked()) {
1704    node->break_target()->Bind();
1705  }
1706  node->break_target()->Unuse();
1707  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1708}
1709
1710
1711void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
1712  frame_->EmitPush(cp);
1713  frame_->EmitPush(Operand(pairs));
1714  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
1715
1716  VirtualFrame::SpilledScope spilled_scope(frame_);
1717  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
1718  // The result is discarded.
1719}
1720
1721
1722void CodeGenerator::VisitDeclaration(Declaration* node) {
1723#ifdef DEBUG
1724  int original_height = frame_->height();
1725#endif
1726  Comment cmnt(masm_, "[ Declaration");
1727  Variable* var = node->proxy()->var();
1728  ASSERT(var != NULL);  // must have been resolved
1729  Slot* slot = var->slot();
1730
1731  // If it was not possible to allocate the variable at compile time,
1732  // we need to "declare" it at runtime to make sure it actually
1733  // exists in the local context.
1734  if (slot != NULL && slot->type() == Slot::LOOKUP) {
1735    // Variables with a "LOOKUP" slot were introduced as non-locals
1736    // during variable resolution and must have mode DYNAMIC.
1737    ASSERT(var->is_dynamic());
1738    // For now, just do a runtime call.
1739    frame_->EmitPush(cp);
1740    frame_->EmitPush(Operand(var->name()));
1741    // Declaration nodes are always declared in only two modes.
1742    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
1743    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
1744    frame_->EmitPush(Operand(Smi::FromInt(attr)));
1745    // Push initial value, if any.
1746    // Note: For variables we must not push an initial value (such as
1747    // 'undefined') because we may have a (legal) redeclaration and we
1748    // must not destroy the current value.
1749    if (node->mode() == Variable::CONST) {
1750      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
1751    } else if (node->fun() != NULL) {
1752      Load(node->fun());
1753    } else {
1754      frame_->EmitPush(Operand(0));
1755    }
1756
1757    VirtualFrame::SpilledScope spilled_scope(frame_);
1758    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
1759    // Ignore the return value (declarations are statements).
1760
1761    ASSERT(frame_->height() == original_height);
1762    return;
1763  }
1764
1765  ASSERT(!var->is_global());
1766
1767  // If we have a function or a constant, we need to initialize the variable.
1768  Expression* val = NULL;
1769  if (node->mode() == Variable::CONST) {
1770    val = new Literal(Factory::the_hole_value());
1771  } else {
1772    val = node->fun();  // NULL if we don't have a function
1773  }
1774
1775  if (val != NULL) {
1776    // Set initial value.
1777    Reference target(this, node->proxy());
1778    Load(val);
1779    target.SetValue(NOT_CONST_INIT);
1780
1781    // Get rid of the assigned value (declarations are statements).
1782    frame_->Drop();
1783  }
1784  ASSERT(frame_->height() == original_height);
1785}
1786
1787
1788void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
1789#ifdef DEBUG
1790  int original_height = frame_->height();
1791#endif
1792  Comment cmnt(masm_, "[ ExpressionStatement");
1793  CodeForStatementPosition(node);
1794  Expression* expression = node->expression();
1795  expression->MarkAsStatement();
1796  Load(expression);
1797  frame_->Drop();
1798  ASSERT(frame_->height() == original_height);
1799}
1800
1801
1802void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
1803#ifdef DEBUG
1804  int original_height = frame_->height();
1805#endif
1806  Comment cmnt(masm_, "// EmptyStatement");
1807  CodeForStatementPosition(node);
1808  // nothing to do
1809  ASSERT(frame_->height() == original_height);
1810}
1811
1812
1813void CodeGenerator::VisitIfStatement(IfStatement* node) {
1814#ifdef DEBUG
1815  int original_height = frame_->height();
1816#endif
1817  Comment cmnt(masm_, "[ IfStatement");
1818  // Generate different code depending on which parts of the if statement
1819  // are present or not.
1820  bool has_then_stm = node->HasThenStatement();
1821  bool has_else_stm = node->HasElseStatement();
1822
1823  CodeForStatementPosition(node);
1824
1825  JumpTarget exit;
1826  if (has_then_stm && has_else_stm) {
1827    Comment cmnt(masm_, "[ IfThenElse");
1828    JumpTarget then;
1829    JumpTarget else_;
1830    // if (cond)
1831    LoadCondition(node->condition(), &then, &else_, true);
1832    if (frame_ != NULL) {
1833      Branch(false, &else_);
1834    }
1835    // then
1836    if (frame_ != NULL || then.is_linked()) {
1837      then.Bind();
1838      Visit(node->then_statement());
1839    }
1840    if (frame_ != NULL) {
1841      exit.Jump();
1842    }
1843    // else
1844    if (else_.is_linked()) {
1845      else_.Bind();
1846      Visit(node->else_statement());
1847    }
1848
1849  } else if (has_then_stm) {
1850    Comment cmnt(masm_, "[ IfThen");
1851    ASSERT(!has_else_stm);
1852    JumpTarget then;
1853    // if (cond)
1854    LoadCondition(node->condition(), &then, &exit, true);
1855    if (frame_ != NULL) {
1856      Branch(false, &exit);
1857    }
1858    // then
1859    if (frame_ != NULL || then.is_linked()) {
1860      then.Bind();
1861      Visit(node->then_statement());
1862    }
1863
1864  } else if (has_else_stm) {
1865    Comment cmnt(masm_, "[ IfElse");
1866    ASSERT(!has_then_stm);
1867    JumpTarget else_;
1868    // if (!cond)
1869    LoadCondition(node->condition(), &exit, &else_, true);
1870    if (frame_ != NULL) {
1871      Branch(true, &exit);
1872    }
1873    // else
1874    if (frame_ != NULL || else_.is_linked()) {
1875      else_.Bind();
1876      Visit(node->else_statement());
1877    }
1878
1879  } else {
1880    Comment cmnt(masm_, "[ If");
1881    ASSERT(!has_then_stm && !has_else_stm);
1882    // if (cond)
1883    LoadCondition(node->condition(), &exit, &exit, false);
1884    if (frame_ != NULL) {
1885      if (has_cc()) {
1886        cc_reg_ = al;
1887      } else {
1888        frame_->Drop();
1889      }
1890    }
1891  }
1892
1893  // end
1894  if (exit.is_linked()) {
1895    exit.Bind();
1896  }
1897  ASSERT(!has_valid_frame() || frame_->height() == original_height);
1898}
1899
1900
1901void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1902  VirtualFrame::SpilledScope spilled_scope(frame_);
1903  Comment cmnt(masm_, "[ ContinueStatement");
1904  CodeForStatementPosition(node);
1905  node->target()->continue_target()->Jump();
1906}
1907
1908
1909void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1910  VirtualFrame::SpilledScope spilled_scope(frame_);
1911  Comment cmnt(masm_, "[ BreakStatement");
1912  CodeForStatementPosition(node);
1913  node->target()->break_target()->Jump();
1914}
1915
1916
1917void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1918  VirtualFrame::SpilledScope spilled_scope(frame_);
1919  Comment cmnt(masm_, "[ ReturnStatement");
1920
1921  CodeForStatementPosition(node);
1922  Load(node->expression());
1923  if (function_return_is_shadowed_) {
1924    frame_->EmitPop(r0);
1925    function_return_.Jump();
1926  } else {
1927    // Pop the result from the frame and prepare the frame for
1928    // returning thus making it easier to merge.
1929    frame_->EmitPop(r0);
1930    frame_->PrepareForReturn();
1931    if (function_return_.is_bound()) {
1932      // If the function return label is already bound we reuse the
1933      // code by jumping to the return site.
1934      function_return_.Jump();
1935    } else {
1936      function_return_.Bind();
1937      GenerateReturnSequence();
1938    }
1939  }
1940}
1941
1942
1943void CodeGenerator::GenerateReturnSequence() {
1944  if (FLAG_trace) {
1945    // Push the return value on the stack as the parameter.
1946    // Runtime::TraceExit returns the parameter as it is.
1947    frame_->EmitPush(r0);
1948    frame_->CallRuntime(Runtime::kTraceExit, 1);
1949  }
1950
1951#ifdef DEBUG
1952  // Add a label for checking the size of the code used for returning.
1953  Label check_exit_codesize;
1954  masm_->bind(&check_exit_codesize);
1955#endif
1956  // Make sure that the constant pool is not emitted inside of the return
1957  // sequence.
1958  { Assembler::BlockConstPoolScope block_const_pool(masm_);
1959    // Tear down the frame which will restore the caller's frame pointer and
1960    // the link register.
1961    frame_->Exit();
1962
1963    // Here we use masm_-> instead of the __ macro to avoid the code coverage
1964    // tool from instrumenting as we rely on the code size here.
1965    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
1966    masm_->add(sp, sp, Operand(sp_delta));
1967    masm_->Jump(lr);
1968    DeleteFrame();
1969
1970#ifdef DEBUG
1971    // Check that the size of the code used for returning matches what is
1972    // expected by the debugger. If the sp_delts above cannot be encoded in
1973    // the add instruction the add will generate two instructions.
1974    int return_sequence_length =
1975        masm_->InstructionsGeneratedSince(&check_exit_codesize);
1976    CHECK(return_sequence_length ==
1977          Assembler::kJSReturnSequenceInstructions ||
1978          return_sequence_length ==
1979          Assembler::kJSReturnSequenceInstructions + 1);
1980#endif
1981  }
1982}
1983
1984
1985void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1986#ifdef DEBUG
1987  int original_height = frame_->height();
1988#endif
1989  VirtualFrame::SpilledScope spilled_scope(frame_);
1990  Comment cmnt(masm_, "[ WithEnterStatement");
1991  CodeForStatementPosition(node);
1992  Load(node->expression());
1993  if (node->is_catch_block()) {
1994    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1995  } else {
1996    frame_->CallRuntime(Runtime::kPushContext, 1);
1997  }
1998#ifdef DEBUG
1999  JumpTarget verified_true;
2000  __ cmp(r0, cp);
2001  verified_true.Branch(eq);
2002  __ stop("PushContext: r0 is expected to be the same as cp");
2003  verified_true.Bind();
2004#endif
2005  // Update context local.
2006  __ str(cp, frame_->Context());
2007  ASSERT(frame_->height() == original_height);
2008}
2009
2010
2011void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
2012#ifdef DEBUG
2013  int original_height = frame_->height();
2014#endif
2015  VirtualFrame::SpilledScope spilled_scope(frame_);
2016  Comment cmnt(masm_, "[ WithExitStatement");
2017  CodeForStatementPosition(node);
2018  // Pop context.
2019  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
2020  // Update context local.
2021  __ str(cp, frame_->Context());
2022  ASSERT(frame_->height() == original_height);
2023}
2024
2025
2026void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
2027#ifdef DEBUG
2028  int original_height = frame_->height();
2029#endif
2030  VirtualFrame::SpilledScope spilled_scope(frame_);
2031  Comment cmnt(masm_, "[ SwitchStatement");
2032  CodeForStatementPosition(node);
2033  node->break_target()->SetExpectedHeight();
2034
2035  Load(node->tag());
2036
2037  JumpTarget next_test;
2038  JumpTarget fall_through;
2039  JumpTarget default_entry;
2040  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
2041  ZoneList<CaseClause*>* cases = node->cases();
2042  int length = cases->length();
2043  CaseClause* default_clause = NULL;
2044
2045  for (int i = 0; i < length; i++) {
2046    CaseClause* clause = cases->at(i);
2047    if (clause->is_default()) {
2048      // Remember the default clause and compile it at the end.
2049      default_clause = clause;
2050      continue;
2051    }
2052
2053    Comment cmnt(masm_, "[ Case clause");
2054    // Compile the test.
2055    next_test.Bind();
2056    next_test.Unuse();
2057    // Duplicate TOS.
2058    __ ldr(r0, frame_->Top());
2059    frame_->EmitPush(r0);
2060    Comparison(eq, NULL, clause->label(), true);
2061    Branch(false, &next_test);
2062
2063    // Before entering the body from the test, remove the switch value from
2064    // the stack.
2065    frame_->Drop();
2066
2067    // Label the body so that fall through is enabled.
2068    if (i > 0 && cases->at(i - 1)->is_default()) {
2069      default_exit.Bind();
2070    } else {
2071      fall_through.Bind();
2072      fall_through.Unuse();
2073    }
2074    VisitStatements(clause->statements());
2075
2076    // If control flow can fall through from the body, jump to the next body
2077    // or the end of the statement.
2078    if (frame_ != NULL) {
2079      if (i < length - 1 && cases->at(i + 1)->is_default()) {
2080        default_entry.Jump();
2081      } else {
2082        fall_through.Jump();
2083      }
2084    }
2085  }
2086
2087  // The final "test" removes the switch value.
2088  next_test.Bind();
2089  frame_->Drop();
2090
2091  // If there is a default clause, compile it.
2092  if (default_clause != NULL) {
2093    Comment cmnt(masm_, "[ Default clause");
2094    default_entry.Bind();
2095    VisitStatements(default_clause->statements());
2096    // If control flow can fall out of the default and there is a case after
2097    // it, jup to that case's body.
2098    if (frame_ != NULL && default_exit.is_bound()) {
2099      default_exit.Jump();
2100    }
2101  }
2102
2103  if (fall_through.is_linked()) {
2104    fall_through.Bind();
2105  }
2106
2107  if (node->break_target()->is_linked()) {
2108    node->break_target()->Bind();
2109  }
2110  node->break_target()->Unuse();
2111  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2112}
2113
2114
2115void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
2116#ifdef DEBUG
2117  int original_height = frame_->height();
2118#endif
2119  VirtualFrame::SpilledScope spilled_scope(frame_);
2120  Comment cmnt(masm_, "[ DoWhileStatement");
2121  CodeForStatementPosition(node);
2122  node->break_target()->SetExpectedHeight();
2123  JumpTarget body(JumpTarget::BIDIRECTIONAL);
2124  IncrementLoopNesting();
2125
2126  // Label the top of the loop for the backward CFG edge.  If the test
2127  // is always true we can use the continue target, and if the test is
2128  // always false there is no need.
2129  ConditionAnalysis info = AnalyzeCondition(node->cond());
2130  switch (info) {
2131    case ALWAYS_TRUE:
2132      node->continue_target()->SetExpectedHeight();
2133      node->continue_target()->Bind();
2134      break;
2135    case ALWAYS_FALSE:
2136      node->continue_target()->SetExpectedHeight();
2137      break;
2138    case DONT_KNOW:
2139      node->continue_target()->SetExpectedHeight();
2140      body.Bind();
2141      break;
2142  }
2143
2144  CheckStack();  // TODO(1222600): ignore if body contains calls.
2145  Visit(node->body());
2146
2147  // Compile the test.
2148  switch (info) {
2149    case ALWAYS_TRUE:
2150      // If control can fall off the end of the body, jump back to the
2151      // top.
2152      if (has_valid_frame()) {
2153        node->continue_target()->Jump();
2154      }
2155      break;
2156    case ALWAYS_FALSE:
2157      // If we have a continue in the body, we only have to bind its
2158      // jump target.
2159      if (node->continue_target()->is_linked()) {
2160        node->continue_target()->Bind();
2161      }
2162      break;
2163    case DONT_KNOW:
2164      // We have to compile the test expression if it can be reached by
2165      // control flow falling out of the body or via continue.
2166      if (node->continue_target()->is_linked()) {
2167        node->continue_target()->Bind();
2168      }
2169      if (has_valid_frame()) {
2170        Comment cmnt(masm_, "[ DoWhileCondition");
2171        CodeForDoWhileConditionPosition(node);
2172        LoadCondition(node->cond(), &body, node->break_target(), true);
2173        if (has_valid_frame()) {
2174          // A invalid frame here indicates that control did not
2175          // fall out of the test expression.
2176          Branch(true, &body);
2177        }
2178      }
2179      break;
2180  }
2181
2182  if (node->break_target()->is_linked()) {
2183    node->break_target()->Bind();
2184  }
2185  DecrementLoopNesting();
2186  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2187}
2188
2189
2190void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
2191#ifdef DEBUG
2192  int original_height = frame_->height();
2193#endif
2194  VirtualFrame::SpilledScope spilled_scope(frame_);
2195  Comment cmnt(masm_, "[ WhileStatement");
2196  CodeForStatementPosition(node);
2197
2198  // If the test is never true and has no side effects there is no need
2199  // to compile the test or body.
2200  ConditionAnalysis info = AnalyzeCondition(node->cond());
2201  if (info == ALWAYS_FALSE) return;
2202
2203  node->break_target()->SetExpectedHeight();
2204  IncrementLoopNesting();
2205
2206  // Label the top of the loop with the continue target for the backward
2207  // CFG edge.
2208  node->continue_target()->SetExpectedHeight();
2209  node->continue_target()->Bind();
2210
2211  if (info == DONT_KNOW) {
2212    JumpTarget body;
2213    LoadCondition(node->cond(), &body, node->break_target(), true);
2214    if (has_valid_frame()) {
2215      // A NULL frame indicates that control did not fall out of the
2216      // test expression.
2217      Branch(false, node->break_target());
2218    }
2219    if (has_valid_frame() || body.is_linked()) {
2220      body.Bind();
2221    }
2222  }
2223
2224  if (has_valid_frame()) {
2225    CheckStack();  // TODO(1222600): ignore if body contains calls.
2226    Visit(node->body());
2227
2228    // If control flow can fall out of the body, jump back to the top.
2229    if (has_valid_frame()) {
2230      node->continue_target()->Jump();
2231    }
2232  }
2233  if (node->break_target()->is_linked()) {
2234    node->break_target()->Bind();
2235  }
2236  DecrementLoopNesting();
2237  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2238}
2239
2240
2241void CodeGenerator::VisitForStatement(ForStatement* node) {
2242#ifdef DEBUG
2243  int original_height = frame_->height();
2244#endif
2245  VirtualFrame::SpilledScope spilled_scope(frame_);
2246  Comment cmnt(masm_, "[ ForStatement");
2247  CodeForStatementPosition(node);
2248  if (node->init() != NULL) {
2249    Visit(node->init());
2250  }
2251
2252  // If the test is never true there is no need to compile the test or
2253  // body.
2254  ConditionAnalysis info = AnalyzeCondition(node->cond());
2255  if (info == ALWAYS_FALSE) return;
2256
2257  node->break_target()->SetExpectedHeight();
2258  IncrementLoopNesting();
2259
2260  // We know that the loop index is a smi if it is not modified in the
2261  // loop body and it is checked against a constant limit in the loop
2262  // condition.  In this case, we reset the static type information of the
2263  // loop index to smi before compiling the body, the update expression, and
2264  // the bottom check of the loop condition.
2265  TypeInfoCodeGenState type_info_scope(this,
2266                                       node->is_fast_smi_loop() ?
2267                                           node->loop_variable()->slot() :
2268                                           NULL,
2269                                       TypeInfo::Smi());
2270
2271  // If there is no update statement, label the top of the loop with the
2272  // continue target, otherwise with the loop target.
2273  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2274  if (node->next() == NULL) {
2275    node->continue_target()->SetExpectedHeight();
2276    node->continue_target()->Bind();
2277  } else {
2278    node->continue_target()->SetExpectedHeight();
2279    loop.Bind();
2280  }
2281
2282  // If the test is always true, there is no need to compile it.
2283  if (info == DONT_KNOW) {
2284    JumpTarget body;
2285    LoadCondition(node->cond(), &body, node->break_target(), true);
2286    if (has_valid_frame()) {
2287      Branch(false, node->break_target());
2288    }
2289    if (has_valid_frame() || body.is_linked()) {
2290      body.Bind();
2291    }
2292  }
2293
2294  if (has_valid_frame()) {
2295    CheckStack();  // TODO(1222600): ignore if body contains calls.
2296    Visit(node->body());
2297
2298    if (node->next() == NULL) {
2299      // If there is no update statement and control flow can fall out
2300      // of the loop, jump directly to the continue label.
2301      if (has_valid_frame()) {
2302        node->continue_target()->Jump();
2303      }
2304    } else {
2305      // If there is an update statement and control flow can reach it
2306      // via falling out of the body of the loop or continuing, we
2307      // compile the update statement.
2308      if (node->continue_target()->is_linked()) {
2309        node->continue_target()->Bind();
2310      }
2311      if (has_valid_frame()) {
2312        // Record source position of the statement as this code which is
2313        // after the code for the body actually belongs to the loop
2314        // statement and not the body.
2315        CodeForStatementPosition(node);
2316        Visit(node->next());
2317        loop.Jump();
2318      }
2319    }
2320  }
2321  if (node->break_target()->is_linked()) {
2322    node->break_target()->Bind();
2323  }
2324  DecrementLoopNesting();
2325  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2326}
2327
2328
2329void CodeGenerator::VisitForInStatement(ForInStatement* node) {
2330#ifdef DEBUG
2331  int original_height = frame_->height();
2332#endif
2333  VirtualFrame::SpilledScope spilled_scope(frame_);
2334  Comment cmnt(masm_, "[ ForInStatement");
2335  CodeForStatementPosition(node);
2336
2337  JumpTarget primitive;
2338  JumpTarget jsobject;
2339  JumpTarget fixed_array;
2340  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
2341  JumpTarget end_del_check;
2342  JumpTarget exit;
2343
2344  // Get the object to enumerate over (converted to JSObject).
2345  Load(node->enumerable());
2346
2347  // Both SpiderMonkey and kjs ignore null and undefined in contrast
2348  // to the specification.  12.6.4 mandates a call to ToObject.
2349  frame_->EmitPop(r0);
2350  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2351  __ cmp(r0, ip);
2352  exit.Branch(eq);
2353  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2354  __ cmp(r0, ip);
2355  exit.Branch(eq);
2356
2357  // Stack layout in body:
2358  // [iteration counter (Smi)]
2359  // [length of array]
2360  // [FixedArray]
2361  // [Map or 0]
2362  // [Object]
2363
2364  // Check if enumerable is already a JSObject
2365  __ tst(r0, Operand(kSmiTagMask));
2366  primitive.Branch(eq);
2367  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
2368  jsobject.Branch(hs);
2369
2370  primitive.Bind();
2371  frame_->EmitPush(r0);
2372  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
2373
2374  jsobject.Bind();
2375  // Get the set of properties (as a FixedArray or Map).
2376  // r0: value to be iterated over
2377  frame_->EmitPush(r0);  // Push the object being iterated over.
2378
2379  // Check cache validity in generated code. This is a fast case for
2380  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
2381  // guarantee cache validity, call the runtime system to check cache
2382  // validity or get the property names in a fixed array.
2383  JumpTarget call_runtime;
2384  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
2385  JumpTarget check_prototype;
2386  JumpTarget use_cache;
2387  __ mov(r1, Operand(r0));
2388  loop.Bind();
2389  // Check that there are no elements.
2390  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
2391  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
2392  __ cmp(r2, r4);
2393  call_runtime.Branch(ne);
2394  // Check that instance descriptors are not empty so that we can
2395  // check for an enum cache.  Leave the map in r3 for the subsequent
2396  // prototype load.
2397  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2398  __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
2399  __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
2400  __ cmp(r2, ip);
2401  call_runtime.Branch(eq);
2402  // Check that there in an enum cache in the non-empty instance
2403  // descriptors.  This is the case if the next enumeration index
2404  // field does not contain a smi.
2405  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
2406  __ tst(r2, Operand(kSmiTagMask));
2407  call_runtime.Branch(eq);
2408  // For all objects but the receiver, check that the cache is empty.
2409  // r4: empty fixed array root.
2410  __ cmp(r1, r0);
2411  check_prototype.Branch(eq);
2412  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
2413  __ cmp(r2, r4);
2414  call_runtime.Branch(ne);
2415  check_prototype.Bind();
2416  // Load the prototype from the map and loop if non-null.
2417  __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
2418  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2419  __ cmp(r1, ip);
2420  loop.Branch(ne);
2421  // The enum cache is valid.  Load the map of the object being
2422  // iterated over and use the cache for the iteration.
2423  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
2424  use_cache.Jump();
2425
2426  call_runtime.Bind();
2427  // Call the runtime to get the property names for the object.
2428  frame_->EmitPush(r0);  // push the object (slot 4) for the runtime call
2429  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
2430
2431  // If we got a map from the runtime call, we can do a fast
2432  // modification check. Otherwise, we got a fixed array, and we have
2433  // to do a slow check.
2434  // r0: map or fixed array (result from call to
2435  // Runtime::kGetPropertyNamesFast)
2436  __ mov(r2, Operand(r0));
2437  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
2438  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
2439  __ cmp(r1, ip);
2440  fixed_array.Branch(ne);
2441
2442  use_cache.Bind();
2443  // Get enum cache
2444  // r0: map (either the result from a call to
2445  // Runtime::kGetPropertyNamesFast or has been fetched directly from
2446  // the object)
2447  __ mov(r1, Operand(r0));
2448  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
2449  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
2450  __ ldr(r2,
2451         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
2452
2453  frame_->EmitPush(r0);  // map
2454  frame_->EmitPush(r2);  // enum cache bridge cache
2455  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
2456  frame_->EmitPush(r0);
2457  __ mov(r0, Operand(Smi::FromInt(0)));
2458  frame_->EmitPush(r0);
2459  entry.Jump();
2460
2461  fixed_array.Bind();
2462  __ mov(r1, Operand(Smi::FromInt(0)));
2463  frame_->EmitPush(r1);  // insert 0 in place of Map
2464  frame_->EmitPush(r0);
2465
2466  // Push the length of the array and the initial index onto the stack.
2467  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
2468  frame_->EmitPush(r0);
2469  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
2470  frame_->EmitPush(r0);
2471
2472  // Condition.
2473  entry.Bind();
2474  // sp[0] : index
2475  // sp[1] : array/enum cache length
2476  // sp[2] : array or enum cache
2477  // sp[3] : 0 or map
2478  // sp[4] : enumerable
2479  // Grab the current frame's height for the break and continue
2480  // targets only after all the state is pushed on the frame.
2481  node->break_target()->SetExpectedHeight();
2482  node->continue_target()->SetExpectedHeight();
2483
2484  // Load the current count to r0, load the length to r1.
2485  __ Ldrd(r0, r1, frame_->ElementAt(0));
2486  __ cmp(r0, r1);  // compare to the array length
2487  node->break_target()->Branch(hs);
2488
2489  // Get the i'th entry of the array.
2490  __ ldr(r2, frame_->ElementAt(2));
2491  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2492  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
2493
2494  // Get Map or 0.
2495  __ ldr(r2, frame_->ElementAt(3));
2496  // Check if this (still) matches the map of the enumerable.
2497  // If not, we have to filter the key.
2498  __ ldr(r1, frame_->ElementAt(4));
2499  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
2500  __ cmp(r1, Operand(r2));
2501  end_del_check.Branch(eq);
2502
2503  // Convert the entry to a string (or null if it isn't a property anymore).
2504  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
2505  frame_->EmitPush(r0);
2506  frame_->EmitPush(r3);  // push entry
2507  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
2508  __ mov(r3, Operand(r0));
2509
2510  // If the property has been removed while iterating, we just skip it.
2511  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2512  __ cmp(r3, ip);
2513  node->continue_target()->Branch(eq);
2514
2515  end_del_check.Bind();
2516  // Store the entry in the 'each' expression and take another spin in the
2517  // loop.  r3: i'th entry of the enum cache (or string there of)
2518  frame_->EmitPush(r3);  // push entry
2519  { Reference each(this, node->each());
2520    if (!each.is_illegal()) {
2521      if (each.size() > 0) {
2522        __ ldr(r0, frame_->ElementAt(each.size()));
2523        frame_->EmitPush(r0);
2524        each.SetValue(NOT_CONST_INIT);
2525        frame_->Drop(2);
2526      } else {
2527        // If the reference was to a slot we rely on the convenient property
2528        // that it doesn't matter whether a value (eg, r3 pushed above) is
2529        // right on top of or right underneath a zero-sized reference.
2530        each.SetValue(NOT_CONST_INIT);
2531        frame_->Drop();
2532      }
2533    }
2534  }
2535  // Body.
2536  CheckStack();  // TODO(1222600): ignore if body contains calls.
2537  Visit(node->body());
2538
2539  // Next.  Reestablish a spilled frame in case we are coming here via
2540  // a continue in the body.
2541  node->continue_target()->Bind();
2542  frame_->SpillAll();
2543  frame_->EmitPop(r0);
2544  __ add(r0, r0, Operand(Smi::FromInt(1)));
2545  frame_->EmitPush(r0);
2546  entry.Jump();
2547
2548  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
2549  // any frame.
2550  node->break_target()->Bind();
2551  frame_->Drop(5);
2552
2553  // Exit.
2554  exit.Bind();
2555  node->continue_target()->Unuse();
2556  node->break_target()->Unuse();
2557  ASSERT(frame_->height() == original_height);
2558}
2559
2560
2561void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
2562#ifdef DEBUG
2563  int original_height = frame_->height();
2564#endif
2565  VirtualFrame::SpilledScope spilled_scope(frame_);
2566  Comment cmnt(masm_, "[ TryCatchStatement");
2567  CodeForStatementPosition(node);
2568
2569  JumpTarget try_block;
2570  JumpTarget exit;
2571
2572  try_block.Call();
2573  // --- Catch block ---
2574  frame_->EmitPush(r0);
2575
2576  // Store the caught exception in the catch variable.
2577  Variable* catch_var = node->catch_var()->var();
2578  ASSERT(catch_var != NULL && catch_var->slot() != NULL);
2579  StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
2580
2581  // Remove the exception from the stack.
2582  frame_->Drop();
2583
2584  VisitStatements(node->catch_block()->statements());
2585  if (frame_ != NULL) {
2586    exit.Jump();
2587  }
2588
2589
2590  // --- Try block ---
2591  try_block.Bind();
2592
2593  frame_->PushTryHandler(TRY_CATCH_HANDLER);
2594  int handler_height = frame_->height();
2595
2596  // Shadow the labels for all escapes from the try block, including
2597  // returns. During shadowing, the original label is hidden as the
2598  // LabelShadow and operations on the original actually affect the
2599  // shadowing label.
2600  //
2601  // We should probably try to unify the escaping labels and the return
2602  // label.
2603  int nof_escapes = node->escaping_targets()->length();
2604  List<ShadowTarget*> shadows(1 + nof_escapes);
2605
2606  // Add the shadow target for the function return.
2607  static const int kReturnShadowIndex = 0;
2608  shadows.Add(new ShadowTarget(&function_return_));
2609  bool function_return_was_shadowed = function_return_is_shadowed_;
2610  function_return_is_shadowed_ = true;
2611  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2612
2613  // Add the remaining shadow targets.
2614  for (int i = 0; i < nof_escapes; i++) {
2615    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2616  }
2617
2618  // Generate code for the statements in the try block.
2619  VisitStatements(node->try_block()->statements());
2620
2621  // Stop the introduced shadowing and count the number of required unlinks.
2622  // After shadowing stops, the original labels are unshadowed and the
2623  // LabelShadows represent the formerly shadowing labels.
2624  bool has_unlinks = false;
2625  for (int i = 0; i < shadows.length(); i++) {
2626    shadows[i]->StopShadowing();
2627    has_unlinks = has_unlinks || shadows[i]->is_linked();
2628  }
2629  function_return_is_shadowed_ = function_return_was_shadowed;
2630
2631  // Get an external reference to the handler address.
2632  ExternalReference handler_address(Top::k_handler_address);
2633
2634  // If we can fall off the end of the try block, unlink from try chain.
2635  if (has_valid_frame()) {
2636    // The next handler address is on top of the frame.  Unlink from
2637    // the handler list and drop the rest of this handler from the
2638    // frame.
2639    ASSERT(StackHandlerConstants::kNextOffset == 0);
2640    frame_->EmitPop(r1);
2641    __ mov(r3, Operand(handler_address));
2642    __ str(r1, MemOperand(r3));
2643    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2644    if (has_unlinks) {
2645      exit.Jump();
2646    }
2647  }
2648
2649  // Generate unlink code for the (formerly) shadowing labels that have been
2650  // jumped to.  Deallocate each shadow target.
2651  for (int i = 0; i < shadows.length(); i++) {
2652    if (shadows[i]->is_linked()) {
2653      // Unlink from try chain;
2654      shadows[i]->Bind();
2655      // Because we can be jumping here (to spilled code) from unspilled
2656      // code, we need to reestablish a spilled frame at this block.
2657      frame_->SpillAll();
2658
2659      // Reload sp from the top handler, because some statements that we
2660      // break from (eg, for...in) may have left stuff on the stack.
2661      __ mov(r3, Operand(handler_address));
2662      __ ldr(sp, MemOperand(r3));
2663      frame_->Forget(frame_->height() - handler_height);
2664
2665      ASSERT(StackHandlerConstants::kNextOffset == 0);
2666      frame_->EmitPop(r1);
2667      __ str(r1, MemOperand(r3));
2668      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2669
2670      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2671        frame_->PrepareForReturn();
2672      }
2673      shadows[i]->other_target()->Jump();
2674    }
2675  }
2676
2677  exit.Bind();
2678  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2679}
2680
2681
2682void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2683#ifdef DEBUG
2684  int original_height = frame_->height();
2685#endif
2686  VirtualFrame::SpilledScope spilled_scope(frame_);
2687  Comment cmnt(masm_, "[ TryFinallyStatement");
2688  CodeForStatementPosition(node);
2689
2690  // State: Used to keep track of reason for entering the finally
2691  // block. Should probably be extended to hold information for
2692  // break/continue from within the try block.
2693  enum { FALLING, THROWING, JUMPING };
2694
2695  JumpTarget try_block;
2696  JumpTarget finally_block;
2697
2698  try_block.Call();
2699
2700  frame_->EmitPush(r0);  // save exception object on the stack
2701  // In case of thrown exceptions, this is where we continue.
2702  __ mov(r2, Operand(Smi::FromInt(THROWING)));
2703  finally_block.Jump();
2704
2705  // --- Try block ---
2706  try_block.Bind();
2707
2708  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2709  int handler_height = frame_->height();
2710
2711  // Shadow the labels for all escapes from the try block, including
2712  // returns.  Shadowing hides the original label as the LabelShadow and
2713  // operations on the original actually affect the shadowing label.
2714  //
2715  // We should probably try to unify the escaping labels and the return
2716  // label.
2717  int nof_escapes = node->escaping_targets()->length();
2718  List<ShadowTarget*> shadows(1 + nof_escapes);
2719
2720  // Add the shadow target for the function return.
2721  static const int kReturnShadowIndex = 0;
2722  shadows.Add(new ShadowTarget(&function_return_));
2723  bool function_return_was_shadowed = function_return_is_shadowed_;
2724  function_return_is_shadowed_ = true;
2725  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2726
2727  // Add the remaining shadow targets.
2728  for (int i = 0; i < nof_escapes; i++) {
2729    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2730  }
2731
2732  // Generate code for the statements in the try block.
2733  VisitStatements(node->try_block()->statements());
2734
2735  // Stop the introduced shadowing and count the number of required unlinks.
2736  // After shadowing stops, the original labels are unshadowed and the
2737  // LabelShadows represent the formerly shadowing labels.
2738  int nof_unlinks = 0;
2739  for (int i = 0; i < shadows.length(); i++) {
2740    shadows[i]->StopShadowing();
2741    if (shadows[i]->is_linked()) nof_unlinks++;
2742  }
2743  function_return_is_shadowed_ = function_return_was_shadowed;
2744
2745  // Get an external reference to the handler address.
2746  ExternalReference handler_address(Top::k_handler_address);
2747
2748  // If we can fall off the end of the try block, unlink from the try
2749  // chain and set the state on the frame to FALLING.
2750  if (has_valid_frame()) {
2751    // The next handler address is on top of the frame.
2752    ASSERT(StackHandlerConstants::kNextOffset == 0);
2753    frame_->EmitPop(r1);
2754    __ mov(r3, Operand(handler_address));
2755    __ str(r1, MemOperand(r3));
2756    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2757
2758    // Fake a top of stack value (unneeded when FALLING) and set the
2759    // state in r2, then jump around the unlink blocks if any.
2760    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2761    frame_->EmitPush(r0);
2762    __ mov(r2, Operand(Smi::FromInt(FALLING)));
2763    if (nof_unlinks > 0) {
2764      finally_block.Jump();
2765    }
2766  }
2767
2768  // Generate code to unlink and set the state for the (formerly)
2769  // shadowing targets that have been jumped to.
2770  for (int i = 0; i < shadows.length(); i++) {
2771    if (shadows[i]->is_linked()) {
2772      // If we have come from the shadowed return, the return value is
2773      // in (a non-refcounted reference to) r0.  We must preserve it
2774      // until it is pushed.
2775      //
2776      // Because we can be jumping here (to spilled code) from
2777      // unspilled code, we need to reestablish a spilled frame at
2778      // this block.
2779      shadows[i]->Bind();
2780      frame_->SpillAll();
2781
2782      // Reload sp from the top handler, because some statements that
2783      // we break from (eg, for...in) may have left stuff on the
2784      // stack.
2785      __ mov(r3, Operand(handler_address));
2786      __ ldr(sp, MemOperand(r3));
2787      frame_->Forget(frame_->height() - handler_height);
2788
2789      // Unlink this handler and drop it from the frame.  The next
2790      // handler address is currently on top of the frame.
2791      ASSERT(StackHandlerConstants::kNextOffset == 0);
2792      frame_->EmitPop(r1);
2793      __ str(r1, MemOperand(r3));
2794      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2795
2796      if (i == kReturnShadowIndex) {
2797        // If this label shadowed the function return, materialize the
2798        // return value on the stack.
2799        frame_->EmitPush(r0);
2800      } else {
2801        // Fake TOS for targets that shadowed breaks and continues.
2802        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2803        frame_->EmitPush(r0);
2804      }
2805      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
2806      if (--nof_unlinks > 0) {
2807        // If this is not the last unlink block, jump around the next.
2808        finally_block.Jump();
2809      }
2810    }
2811  }
2812
2813  // --- Finally block ---
2814  finally_block.Bind();
2815
2816  // Push the state on the stack.
2817  frame_->EmitPush(r2);
2818
2819  // We keep two elements on the stack - the (possibly faked) result
2820  // and the state - while evaluating the finally block.
2821  //
2822  // Generate code for the statements in the finally block.
2823  VisitStatements(node->finally_block()->statements());
2824
2825  if (has_valid_frame()) {
2826    // Restore state and return value or faked TOS.
2827    frame_->EmitPop(r2);
2828    frame_->EmitPop(r0);
2829  }
2830
2831  // Generate code to jump to the right destination for all used
2832  // formerly shadowing targets.  Deallocate each shadow target.
2833  for (int i = 0; i < shadows.length(); i++) {
2834    if (has_valid_frame() && shadows[i]->is_bound()) {
2835      JumpTarget* original = shadows[i]->other_target();
2836      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
2837      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
2838        JumpTarget skip;
2839        skip.Branch(ne);
2840        frame_->PrepareForReturn();
2841        original->Jump();
2842        skip.Bind();
2843      } else {
2844        original->Branch(eq);
2845      }
2846    }
2847  }
2848
2849  if (has_valid_frame()) {
2850    // Check if we need to rethrow the exception.
2851    JumpTarget exit;
2852    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
2853    exit.Branch(ne);
2854
2855    // Rethrow exception.
2856    frame_->EmitPush(r0);
2857    frame_->CallRuntime(Runtime::kReThrow, 1);
2858
2859    // Done.
2860    exit.Bind();
2861  }
2862  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2863}
2864
2865
2866void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2867#ifdef DEBUG
2868  int original_height = frame_->height();
2869#endif
2870  Comment cmnt(masm_, "[ DebuggerStatament");
2871  CodeForStatementPosition(node);
2872#ifdef ENABLE_DEBUGGER_SUPPORT
2873  frame_->DebugBreak();
2874#endif
2875  // Ignore the return value.
2876  ASSERT(frame_->height() == original_height);
2877}
2878
2879
2880void CodeGenerator::InstantiateFunction(
2881    Handle<SharedFunctionInfo> function_info) {
2882  // Use the fast case closure allocation code that allocates in new
2883  // space for nested functions that don't need literals cloning.
2884  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
2885    FastNewClosureStub stub;
2886    frame_->EmitPush(Operand(function_info));
2887    frame_->SpillAll();
2888    frame_->CallStub(&stub, 1);
2889    frame_->EmitPush(r0);
2890  } else {
2891    // Create a new closure.
2892    frame_->EmitPush(cp);
2893    frame_->EmitPush(Operand(function_info));
2894    frame_->CallRuntime(Runtime::kNewClosure, 2);
2895    frame_->EmitPush(r0);
2896  }
2897}
2898
2899
2900void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2901#ifdef DEBUG
2902  int original_height = frame_->height();
2903#endif
2904  Comment cmnt(masm_, "[ FunctionLiteral");
2905
2906  // Build the function info and instantiate it.
2907  Handle<SharedFunctionInfo> function_info =
2908      Compiler::BuildFunctionInfo(node, script(), this);
2909  // Check for stack-overflow exception.
2910  if (HasStackOverflow()) {
2911    ASSERT(frame_->height() == original_height);
2912    return;
2913  }
2914  InstantiateFunction(function_info);
2915  ASSERT_EQ(original_height + 1, frame_->height());
2916}
2917
2918
2919void CodeGenerator::VisitSharedFunctionInfoLiteral(
2920    SharedFunctionInfoLiteral* node) {
2921#ifdef DEBUG
2922  int original_height = frame_->height();
2923#endif
2924  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
2925  InstantiateFunction(node->shared_function_info());
2926  ASSERT_EQ(original_height + 1, frame_->height());
2927}
2928
2929
2930void CodeGenerator::VisitConditional(Conditional* node) {
2931#ifdef DEBUG
2932  int original_height = frame_->height();
2933#endif
2934  VirtualFrame::SpilledScope spilled_scope(frame_);
2935  Comment cmnt(masm_, "[ Conditional");
2936  JumpTarget then;
2937  JumpTarget else_;
2938  LoadCondition(node->condition(), &then, &else_, true);
2939  if (has_valid_frame()) {
2940    Branch(false, &else_);
2941  }
2942  if (has_valid_frame() || then.is_linked()) {
2943    then.Bind();
2944    Load(node->then_expression());
2945  }
2946  if (else_.is_linked()) {
2947    JumpTarget exit;
2948    if (has_valid_frame()) exit.Jump();
2949    else_.Bind();
2950    Load(node->else_expression());
2951    if (exit.is_linked()) exit.Bind();
2952  }
2953  ASSERT_EQ(original_height + 1, frame_->height());
2954}
2955
2956
2957void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
2958  if (slot->type() == Slot::LOOKUP) {
2959    ASSERT(slot->var()->is_dynamic());
2960
2961    // JumpTargets do not yet support merging frames so the frame must be
2962    // spilled when jumping to these targets.
2963    JumpTarget slow;
2964    JumpTarget done;
2965
2966    // Generate fast case for loading from slots that correspond to
2967    // local/global variables or arguments unless they are shadowed by
2968    // eval-introduced bindings.
2969    EmitDynamicLoadFromSlotFastCase(slot,
2970                                    typeof_state,
2971                                    &slow,
2972                                    &done);
2973
2974    slow.Bind();
2975    VirtualFrame::SpilledScope spilled_scope(frame_);
2976    frame_->EmitPush(cp);
2977    __ mov(r0, Operand(slot->var()->name()));
2978    frame_->EmitPush(r0);
2979
2980    if (typeof_state == INSIDE_TYPEOF) {
2981      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
2982    } else {
2983      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2984    }
2985
2986    done.Bind();
2987    frame_->EmitPush(r0);
2988
2989  } else {
2990    Register scratch = VirtualFrame::scratch0();
2991    TypeInfo info = type_info(slot);
2992    frame_->EmitPush(SlotOperand(slot, scratch), info);
2993    if (slot->var()->mode() == Variable::CONST) {
2994      // Const slots may contain 'the hole' value (the constant hasn't been
2995      // initialized yet) which needs to be converted into the 'undefined'
2996      // value.
2997      Comment cmnt(masm_, "[ Unhole const");
2998      frame_->EmitPop(scratch);
2999      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3000      __ cmp(scratch, ip);
3001      __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
3002      frame_->EmitPush(scratch);
3003    }
3004  }
3005}
3006
3007
3008void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
3009                                                  TypeofState state) {
3010  LoadFromSlot(slot, state);
3011
3012  // Bail out quickly if we're not using lazy arguments allocation.
3013  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
3014
3015  // ... or if the slot isn't a non-parameter arguments slot.
3016  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
3017
3018  VirtualFrame::SpilledScope spilled_scope(frame_);
3019
3020  // Load the loaded value from the stack into r0 but leave it on the
3021  // stack.
3022  __ ldr(r0, MemOperand(sp, 0));
3023
3024  // If the loaded value is the sentinel that indicates that we
3025  // haven't loaded the arguments object yet, we need to do it now.
3026  JumpTarget exit;
3027  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3028  __ cmp(r0, ip);
3029  exit.Branch(ne);
3030  frame_->Drop();
3031  StoreArgumentsObject(false);
3032  exit.Bind();
3033}
3034
3035
3036void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
3037  ASSERT(slot != NULL);
3038  if (slot->type() == Slot::LOOKUP) {
3039    VirtualFrame::SpilledScope spilled_scope(frame_);
3040    ASSERT(slot->var()->is_dynamic());
3041
3042    // For now, just do a runtime call.
3043    frame_->EmitPush(cp);
3044    __ mov(r0, Operand(slot->var()->name()));
3045    frame_->EmitPush(r0);
3046
3047    if (init_state == CONST_INIT) {
3048      // Same as the case for a normal store, but ignores attribute
3049      // (e.g. READ_ONLY) of context slot so that we can initialize
3050      // const properties (introduced via eval("const foo = (some
3051      // expr);")). Also, uses the current function context instead of
3052      // the top context.
3053      //
3054      // Note that we must declare the foo upon entry of eval(), via a
3055      // context slot declaration, but we cannot initialize it at the
3056      // same time, because the const declaration may be at the end of
3057      // the eval code (sigh...) and the const variable may have been
3058      // used before (where its value is 'undefined'). Thus, we can only
3059      // do the initialization when we actually encounter the expression
3060      // and when the expression operands are defined and valid, and
3061      // thus we need the split into 2 operations: declaration of the
3062      // context slot followed by initialization.
3063      frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
3064    } else {
3065      frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
3066    }
3067    // Storing a variable must keep the (new) value on the expression
3068    // stack. This is necessary for compiling assignment expressions.
3069    frame_->EmitPush(r0);
3070
3071  } else {
3072    ASSERT(!slot->var()->is_dynamic());
3073    Register scratch = VirtualFrame::scratch0();
3074    VirtualFrame::RegisterAllocationScope scope(this);
3075
3076    // The frame must be spilled when branching to this target.
3077    JumpTarget exit;
3078
3079    if (init_state == CONST_INIT) {
3080      ASSERT(slot->var()->mode() == Variable::CONST);
3081      // Only the first const initialization must be executed (the slot
3082      // still contains 'the hole' value). When the assignment is
3083      // executed, the code is identical to a normal store (see below).
3084      Comment cmnt(masm_, "[ Init const");
3085      __ ldr(scratch, SlotOperand(slot, scratch));
3086      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3087      __ cmp(scratch, ip);
3088      frame_->SpillAll();
3089      exit.Branch(ne);
3090    }
3091
3092    // We must execute the store.  Storing a variable must keep the
3093    // (new) value on the stack. This is necessary for compiling
3094    // assignment expressions.
3095    //
3096    // Note: We will reach here even with slot->var()->mode() ==
3097    // Variable::CONST because of const declarations which will
3098    // initialize consts to 'the hole' value and by doing so, end up
3099    // calling this code.  r2 may be loaded with context; used below in
3100    // RecordWrite.
3101    Register tos = frame_->Peek();
3102    __ str(tos, SlotOperand(slot, scratch));
3103    if (slot->type() == Slot::CONTEXT) {
3104      // Skip write barrier if the written value is a smi.
3105      __ tst(tos, Operand(kSmiTagMask));
3106      // We don't use tos any more after here.
3107      VirtualFrame::SpilledScope spilled_scope(frame_);
3108      exit.Branch(eq);
3109      // scratch is loaded with context when calling SlotOperand above.
3110      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
3111      // r1 could be identical with tos, but that doesn't matter.
3112      __ RecordWrite(scratch, Operand(offset), r3, r1);
3113    }
3114    // If we definitely did not jump over the assignment, we do not need
3115    // to bind the exit label.  Doing so can defeat peephole
3116    // optimization.
3117    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
3118      frame_->SpillAll();
3119      exit.Bind();
3120    }
3121  }
3122}
3123
3124
3125void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
3126                                                      TypeofState typeof_state,
3127                                                      JumpTarget* slow) {
3128  // Check that no extension objects have been created by calls to
3129  // eval from the current scope to the global scope.
3130  Register tmp = frame_->scratch0();
3131  Register tmp2 = frame_->scratch1();
3132  Register context = cp;
3133  Scope* s = scope();
3134  while (s != NULL) {
3135    if (s->num_heap_slots() > 0) {
3136      if (s->calls_eval()) {
3137        frame_->SpillAll();
3138        // Check that extension is NULL.
3139        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
3140        __ tst(tmp2, tmp2);
3141        slow->Branch(ne);
3142      }
3143      // Load next context in chain.
3144      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
3145      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3146      context = tmp;
3147    }
3148    // If no outer scope calls eval, we do not need to check more
3149    // context extensions.
3150    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
3151    s = s->outer_scope();
3152  }
3153
3154  if (s->is_eval_scope()) {
3155    frame_->SpillAll();
3156    Label next, fast;
3157    __ Move(tmp, context);
3158    __ bind(&next);
3159    // Terminate at global context.
3160    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
3161    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
3162    __ cmp(tmp2, ip);
3163    __ b(eq, &fast);
3164    // Check that extension is NULL.
3165    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
3166    __ tst(tmp2, tmp2);
3167    slow->Branch(ne);
3168    // Load next context in chain.
3169    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
3170    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
3171    __ b(&next);
3172    __ bind(&fast);
3173  }
3174
3175  // Load the global object.
3176  LoadGlobal();
3177  // Setup the name register and call load IC.
3178  frame_->CallLoadIC(slot->var()->name(),
3179                     typeof_state == INSIDE_TYPEOF
3180                         ? RelocInfo::CODE_TARGET
3181                         : RelocInfo::CODE_TARGET_CONTEXT);
3182}
3183
3184
3185void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
3186                                                    TypeofState typeof_state,
3187                                                    JumpTarget* slow,
3188                                                    JumpTarget* done) {
3189  // Generate fast-case code for variables that might be shadowed by
3190  // eval-introduced variables.  Eval is used a lot without
3191  // introducing variables.  In those cases, we do not want to
3192  // perform a runtime call for all variables in the scope
3193  // containing the eval.
3194  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
3195    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
3196    frame_->SpillAll();
3197    done->Jump();
3198
3199  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
3200    frame_->SpillAll();
3201    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
3202    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
3203    if (potential_slot != NULL) {
3204      // Generate fast case for locals that rewrite to slots.
3205      __ ldr(r0,
3206             ContextSlotOperandCheckExtensions(potential_slot,
3207                                               r1,
3208                                               r2,
3209                                               slow));
3210      if (potential_slot->var()->mode() == Variable::CONST) {
3211        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3212        __ cmp(r0, ip);
3213        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
3214      }
3215      done->Jump();
3216    } else if (rewrite != NULL) {
3217      // Generate fast case for argument loads.
3218      Property* property = rewrite->AsProperty();
3219      if (property != NULL) {
3220        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
3221        Literal* key_literal = property->key()->AsLiteral();
3222        if (obj_proxy != NULL &&
3223            key_literal != NULL &&
3224            obj_proxy->IsArguments() &&
3225            key_literal->handle()->IsSmi()) {
3226          // Load arguments object if there are no eval-introduced
3227          // variables. Then load the argument from the arguments
3228          // object using keyed load.
3229          __ ldr(r0,
3230                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
3231                                                   r1,
3232                                                   r2,
3233                                                   slow));
3234          frame_->EmitPush(r0);
3235          __ mov(r1, Operand(key_literal->handle()));
3236          frame_->EmitPush(r1);
3237          EmitKeyedLoad();
3238          done->Jump();
3239        }
3240      }
3241    }
3242  }
3243}
3244
3245
3246void CodeGenerator::VisitSlot(Slot* node) {
3247#ifdef DEBUG
3248  int original_height = frame_->height();
3249#endif
3250  Comment cmnt(masm_, "[ Slot");
3251  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
3252  ASSERT_EQ(original_height + 1, frame_->height());
3253}
3254
3255
3256void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
3257#ifdef DEBUG
3258  int original_height = frame_->height();
3259#endif
3260  Comment cmnt(masm_, "[ VariableProxy");
3261
3262  Variable* var = node->var();
3263  Expression* expr = var->rewrite();
3264  if (expr != NULL) {
3265    Visit(expr);
3266  } else {
3267    ASSERT(var->is_global());
3268    Reference ref(this, node);
3269    ref.GetValue();
3270  }
3271  ASSERT_EQ(original_height + 1, frame_->height());
3272}
3273
3274
3275void CodeGenerator::VisitLiteral(Literal* node) {
3276#ifdef DEBUG
3277  int original_height = frame_->height();
3278#endif
3279  Comment cmnt(masm_, "[ Literal");
3280  Register reg = frame_->GetTOSRegister();
3281  bool is_smi = node->handle()->IsSmi();
3282  __ mov(reg, Operand(node->handle()));
3283  frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
3284  ASSERT_EQ(original_height + 1, frame_->height());
3285}
3286
3287
3288void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3289#ifdef DEBUG
3290  int original_height = frame_->height();
3291#endif
3292  VirtualFrame::SpilledScope spilled_scope(frame_);
3293  Comment cmnt(masm_, "[ RexExp Literal");
3294
3295  // Retrieve the literal array and check the allocated entry.
3296
3297  // Load the function of this activation.
3298  __ ldr(r1, frame_->Function());
3299
3300  // Load the literals array of the function.
3301  __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
3302
3303  // Load the literal at the ast saved index.
3304  int literal_offset =
3305      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3306  __ ldr(r2, FieldMemOperand(r1, literal_offset));
3307
3308  JumpTarget done;
3309  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3310  __ cmp(r2, ip);
3311  done.Branch(ne);
3312
3313  // If the entry is undefined we call the runtime system to computed
3314  // the literal.
3315  frame_->EmitPush(r1);  // literal array  (0)
3316  __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
3317  frame_->EmitPush(r0);  // literal index  (1)
3318  __ mov(r0, Operand(node->pattern()));  // RegExp pattern (2)
3319  frame_->EmitPush(r0);
3320  __ mov(r0, Operand(node->flags()));  // RegExp flags   (3)
3321  frame_->EmitPush(r0);
3322  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3323  __ mov(r2, Operand(r0));
3324
3325  done.Bind();
3326  // Push the literal.
3327  frame_->EmitPush(r2);
3328  ASSERT_EQ(original_height + 1, frame_->height());
3329}
3330
3331
3332void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3333#ifdef DEBUG
3334  int original_height = frame_->height();
3335#endif
3336  VirtualFrame::SpilledScope spilled_scope(frame_);
3337  Comment cmnt(masm_, "[ ObjectLiteral");
3338
3339  // Load the function of this activation.
3340  __ ldr(r3, frame_->Function());
3341  // Literal array.
3342  __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
3343  // Literal index.
3344  __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
3345  // Constant properties.
3346  __ mov(r1, Operand(node->constant_properties()));
3347  // Should the object literal have fast elements?
3348  __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
3349  frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
3350  if (node->depth() > 1) {
3351    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
3352  } else {
3353    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
3354  }
3355  frame_->EmitPush(r0);  // save the result
3356  for (int i = 0; i < node->properties()->length(); i++) {
3357    // At the start of each iteration, the top of stack contains
3358    // the newly created object literal.
3359    ObjectLiteral::Property* property = node->properties()->at(i);
3360    Literal* key = property->key();
3361    Expression* value = property->value();
3362    switch (property->kind()) {
3363      case ObjectLiteral::Property::CONSTANT:
3364        break;
3365      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
3366        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
3367        // else fall through
3368      case ObjectLiteral::Property::COMPUTED:
3369        if (key->handle()->IsSymbol()) {
3370          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
3371          Load(value);
3372          frame_->EmitPop(r0);
3373          __ mov(r2, Operand(key->handle()));
3374          __ ldr(r1, frame_->Top());  // Load the receiver.
3375          frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
3376          break;
3377        }
3378        // else fall through
3379      case ObjectLiteral::Property::PROTOTYPE: {
3380        __ ldr(r0, frame_->Top());
3381        frame_->EmitPush(r0);  // dup the result
3382        Load(key);
3383        Load(value);
3384        frame_->CallRuntime(Runtime::kSetProperty, 3);
3385        break;
3386      }
3387      case ObjectLiteral::Property::SETTER: {
3388        __ ldr(r0, frame_->Top());
3389        frame_->EmitPush(r0);
3390        Load(key);
3391        __ mov(r0, Operand(Smi::FromInt(1)));
3392        frame_->EmitPush(r0);
3393        Load(value);
3394        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3395        break;
3396      }
3397      case ObjectLiteral::Property::GETTER: {
3398        __ ldr(r0, frame_->Top());
3399        frame_->EmitPush(r0);
3400        Load(key);
3401        __ mov(r0, Operand(Smi::FromInt(0)));
3402        frame_->EmitPush(r0);
3403        Load(value);
3404        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
3405        break;
3406      }
3407    }
3408  }
3409  ASSERT_EQ(original_height + 1, frame_->height());
3410}
3411
3412
3413void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
3414#ifdef DEBUG
3415  int original_height = frame_->height();
3416#endif
3417  VirtualFrame::SpilledScope spilled_scope(frame_);
3418  Comment cmnt(masm_, "[ ArrayLiteral");
3419
3420  // Load the function of this activation.
3421  __ ldr(r2, frame_->Function());
3422  // Load the literals array of the function.
3423  __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
3424  __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
3425  __ mov(r0, Operand(node->constant_elements()));
3426  frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
3427  int length = node->values()->length();
3428  if (node->depth() > 1) {
3429    frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
3430  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
3431    frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
3432  } else {
3433    FastCloneShallowArrayStub stub(length);
3434    frame_->CallStub(&stub, 3);
3435  }
3436  frame_->EmitPush(r0);  // save the result
3437  // r0: created object literal
3438
3439  // Generate code to set the elements in the array that are not
3440  // literals.
3441  for (int i = 0; i < node->values()->length(); i++) {
3442    Expression* value = node->values()->at(i);
3443
3444    // If value is a literal the property value is already set in the
3445    // boilerplate object.
3446    if (value->AsLiteral() != NULL) continue;
3447    // If value is a materialized literal the property value is already set
3448    // in the boilerplate object if it is simple.
3449    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
3450
3451    // The property must be set by generated code.
3452    Load(value);
3453    frame_->EmitPop(r0);
3454
3455    // Fetch the object literal.
3456    __ ldr(r1, frame_->Top());
3457    // Get the elements array.
3458    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
3459
3460    // Write to the indexed properties array.
3461    int offset = i * kPointerSize + FixedArray::kHeaderSize;
3462    __ str(r0, FieldMemOperand(r1, offset));
3463
3464    // Update the write barrier for the array address.
3465    __ RecordWrite(r1, Operand(offset), r3, r2);
3466  }
3467  ASSERT_EQ(original_height + 1, frame_->height());
3468}
3469
3470
3471void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
3472#ifdef DEBUG
3473  int original_height = frame_->height();
3474#endif
3475  // Call runtime routine to allocate the catch extension object and
3476  // assign the exception value to the catch variable.
3477  Comment cmnt(masm_, "[ CatchExtensionObject");
3478  Load(node->key());
3479  Load(node->value());
3480  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
3481  frame_->EmitPush(r0);
3482  ASSERT_EQ(original_height + 1, frame_->height());
3483}
3484
3485
3486void CodeGenerator::EmitSlotAssignment(Assignment* node) {
3487#ifdef DEBUG
3488  int original_height = frame_->height();
3489#endif
3490  Comment cmnt(masm(), "[ Variable Assignment");
3491  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3492  ASSERT(var != NULL);
3493  Slot* slot = var->slot();
3494  ASSERT(slot != NULL);
3495
3496  // Evaluate the right-hand side.
3497  if (node->is_compound()) {
3498    // For a compound assignment the right-hand side is a binary operation
3499    // between the current property value and the actual right-hand side.
3500    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
3501
3502    // Perform the binary operation.
3503    Literal* literal = node->value()->AsLiteral();
3504    bool overwrite_value =
3505        (node->value()->AsBinaryOperation() != NULL &&
3506         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3507    if (literal != NULL && literal->handle()->IsSmi()) {
3508      SmiOperation(node->binary_op(),
3509                   literal->handle(),
3510                   false,
3511                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3512    } else {
3513      GenerateInlineSmi inline_smi =
3514          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3515      if (literal != NULL) {
3516        ASSERT(!literal->handle()->IsSmi());
3517        inline_smi = DONT_GENERATE_INLINE_SMI;
3518      }
3519      Load(node->value());
3520      GenericBinaryOperation(node->binary_op(),
3521                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3522                             inline_smi);
3523    }
3524  } else {
3525    Load(node->value());
3526  }
3527
3528  // Perform the assignment.
3529  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
3530    CodeForSourcePosition(node->position());
3531    StoreToSlot(slot,
3532                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
3533  }
3534  ASSERT_EQ(original_height + 1, frame_->height());
3535}
3536
3537
3538void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
3539#ifdef DEBUG
3540  int original_height = frame_->height();
3541#endif
3542  Comment cmnt(masm(), "[ Named Property Assignment");
3543  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3544  Property* prop = node->target()->AsProperty();
3545  ASSERT(var == NULL || (prop == NULL && var->is_global()));
3546
3547  // Initialize name and evaluate the receiver sub-expression if necessary. If
3548  // the receiver is trivial it is not placed on the stack at this point, but
3549  // loaded whenever actually needed.
3550  Handle<String> name;
3551  bool is_trivial_receiver = false;
3552  if (var != NULL) {
3553    name = var->name();
3554  } else {
3555    Literal* lit = prop->key()->AsLiteral();
3556    ASSERT_NOT_NULL(lit);
3557    name = Handle<String>::cast(lit->handle());
3558    // Do not materialize the receiver on the frame if it is trivial.
3559    is_trivial_receiver = prop->obj()->IsTrivial();
3560    if (!is_trivial_receiver) Load(prop->obj());
3561  }
3562
3563  // Change to slow case in the beginning of an initialization block to
3564  // avoid the quadratic behavior of repeatedly adding fast properties.
3565  if (node->starts_initialization_block()) {
3566    // Initialization block consists of assignments of the form expr.x = ..., so
3567    // this will never be an assignment to a variable, so there must be a
3568    // receiver object.
3569    ASSERT_EQ(NULL, var);
3570    if (is_trivial_receiver) {
3571      Load(prop->obj());
3572    } else {
3573      frame_->Dup();
3574    }
3575    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3576  }
3577
3578  // Change to fast case at the end of an initialization block. To prepare for
3579  // that add an extra copy of the receiver to the frame, so that it can be
3580  // converted back to fast case after the assignment.
3581  if (node->ends_initialization_block() && !is_trivial_receiver) {
3582    frame_->Dup();
3583  }
3584
3585  // Stack layout:
3586  // [tos]   : receiver (only materialized if non-trivial)
3587  // [tos+1] : receiver if at the end of an initialization block
3588
3589  // Evaluate the right-hand side.
3590  if (node->is_compound()) {
3591    // For a compound assignment the right-hand side is a binary operation
3592    // between the current property value and the actual right-hand side.
3593    if (is_trivial_receiver) {
3594      Load(prop->obj());
3595    } else if (var != NULL) {
3596      LoadGlobal();
3597    } else {
3598      frame_->Dup();
3599    }
3600    EmitNamedLoad(name, var != NULL);
3601
3602    // Perform the binary operation.
3603    Literal* literal = node->value()->AsLiteral();
3604    bool overwrite_value =
3605        (node->value()->AsBinaryOperation() != NULL &&
3606         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3607    if (literal != NULL && literal->handle()->IsSmi()) {
3608      SmiOperation(node->binary_op(),
3609                   literal->handle(),
3610                   false,
3611                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3612    } else {
3613      GenerateInlineSmi inline_smi =
3614          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3615      if (literal != NULL) {
3616        ASSERT(!literal->handle()->IsSmi());
3617        inline_smi = DONT_GENERATE_INLINE_SMI;
3618      }
3619      Load(node->value());
3620      GenericBinaryOperation(node->binary_op(),
3621                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3622                             inline_smi);
3623    }
3624  } else {
3625    // For non-compound assignment just load the right-hand side.
3626    Load(node->value());
3627  }
3628
3629  // Stack layout:
3630  // [tos]   : value
3631  // [tos+1] : receiver (only materialized if non-trivial)
3632  // [tos+2] : receiver if at the end of an initialization block
3633
3634  // Perform the assignment.  It is safe to ignore constants here.
3635  ASSERT(var == NULL || var->mode() != Variable::CONST);
3636  ASSERT_NE(Token::INIT_CONST, node->op());
3637  if (is_trivial_receiver) {
3638    // Load the receiver and swap with the value.
3639    Load(prop->obj());
3640    Register t0 = frame_->PopToRegister();
3641    Register t1 = frame_->PopToRegister(t0);
3642    frame_->EmitPush(t0);
3643    frame_->EmitPush(t1);
3644  }
3645  CodeForSourcePosition(node->position());
3646  bool is_contextual = (var != NULL);
3647  EmitNamedStore(name, is_contextual);
3648  frame_->EmitPush(r0);
3649
3650  // Change to fast case at the end of an initialization block.
3651  if (node->ends_initialization_block()) {
3652    ASSERT_EQ(NULL, var);
3653    // The argument to the runtime call is the receiver.
3654    if (is_trivial_receiver) {
3655      Load(prop->obj());
3656    } else {
3657      // A copy of the receiver is below the value of the assignment. Swap
3658      // the receiver and the value of the assignment expression.
3659      Register t0 = frame_->PopToRegister();
3660      Register t1 = frame_->PopToRegister(t0);
3661      frame_->EmitPush(t0);
3662      frame_->EmitPush(t1);
3663    }
3664    frame_->CallRuntime(Runtime::kToFastProperties, 1);
3665  }
3666
3667  // Stack layout:
3668  // [tos]   : result
3669
3670  ASSERT_EQ(original_height + 1, frame_->height());
3671}
3672
3673
3674void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
3675#ifdef DEBUG
3676  int original_height = frame_->height();
3677#endif
3678  Comment cmnt(masm_, "[ Keyed Property Assignment");
3679  Property* prop = node->target()->AsProperty();
3680  ASSERT_NOT_NULL(prop);
3681
3682  // Evaluate the receiver subexpression.
3683  Load(prop->obj());
3684
3685  // Change to slow case in the beginning of an initialization block to
3686  // avoid the quadratic behavior of repeatedly adding fast properties.
3687  if (node->starts_initialization_block()) {
3688    frame_->Dup();
3689    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
3690  }
3691
3692  // Change to fast case at the end of an initialization block. To prepare for
3693  // that add an extra copy of the receiver to the frame, so that it can be
3694  // converted back to fast case after the assignment.
3695  if (node->ends_initialization_block()) {
3696    frame_->Dup();
3697  }
3698
3699  // Evaluate the key subexpression.
3700  Load(prop->key());
3701
3702  // Stack layout:
3703  // [tos]   : key
3704  // [tos+1] : receiver
3705  // [tos+2] : receiver if at the end of an initialization block
3706
3707  // Evaluate the right-hand side.
3708  if (node->is_compound()) {
3709    // For a compound assignment the right-hand side is a binary operation
3710    // between the current property value and the actual right-hand side.
3711    // Duplicate receiver and key for loading the current property value.
3712    frame_->Dup2();
3713    EmitKeyedLoad();
3714    frame_->EmitPush(r0);
3715
3716    // Perform the binary operation.
3717    Literal* literal = node->value()->AsLiteral();
3718    bool overwrite_value =
3719        (node->value()->AsBinaryOperation() != NULL &&
3720         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3721    if (literal != NULL && literal->handle()->IsSmi()) {
3722      SmiOperation(node->binary_op(),
3723                   literal->handle(),
3724                   false,
3725                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
3726    } else {
3727      GenerateInlineSmi inline_smi =
3728          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
3729      if (literal != NULL) {
3730        ASSERT(!literal->handle()->IsSmi());
3731        inline_smi = DONT_GENERATE_INLINE_SMI;
3732      }
3733      Load(node->value());
3734      GenericBinaryOperation(node->binary_op(),
3735                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
3736                             inline_smi);
3737    }
3738  } else {
3739    // For non-compound assignment just load the right-hand side.
3740    Load(node->value());
3741  }
3742
3743  // Stack layout:
3744  // [tos]   : value
3745  // [tos+1] : key
3746  // [tos+2] : receiver
3747  // [tos+3] : receiver if at the end of an initialization block
3748
3749  // Perform the assignment.  It is safe to ignore constants here.
3750  ASSERT(node->op() != Token::INIT_CONST);
3751  CodeForSourcePosition(node->position());
3752  EmitKeyedStore(prop->key()->type());
3753  frame_->EmitPush(r0);
3754
3755  // Stack layout:
3756  // [tos]   : result
3757  // [tos+1] : receiver if at the end of an initialization block
3758
3759  // Change to fast case at the end of an initialization block.
3760  if (node->ends_initialization_block()) {
3761    // The argument to the runtime call is the extra copy of the receiver,
3762    // which is below the value of the assignment.  Swap the receiver and
3763    // the value of the assignment expression.
3764    Register t0 = frame_->PopToRegister();
3765    Register t1 = frame_->PopToRegister(t0);
3766    frame_->EmitPush(t1);
3767    frame_->EmitPush(t0);
3768    frame_->CallRuntime(Runtime::kToFastProperties, 1);
3769  }
3770
3771  // Stack layout:
3772  // [tos]   : result
3773
3774  ASSERT_EQ(original_height + 1, frame_->height());
3775}
3776
3777
3778void CodeGenerator::VisitAssignment(Assignment* node) {
3779  VirtualFrame::RegisterAllocationScope scope(this);
3780#ifdef DEBUG
3781  int original_height = frame_->height();
3782#endif
3783  Comment cmnt(masm_, "[ Assignment");
3784
3785  Variable* var = node->target()->AsVariableProxy()->AsVariable();
3786  Property* prop = node->target()->AsProperty();
3787
3788  if (var != NULL && !var->is_global()) {
3789    EmitSlotAssignment(node);
3790
3791  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
3792             (var != NULL && var->is_global())) {
3793    // Properties whose keys are property names and global variables are
3794    // treated as named property references.  We do not need to consider
3795    // global 'this' because it is not a valid left-hand side.
3796    EmitNamedPropertyAssignment(node);
3797
3798  } else if (prop != NULL) {
3799    // Other properties (including rewritten parameters for a function that
3800    // uses arguments) are keyed property assignments.
3801    EmitKeyedPropertyAssignment(node);
3802
3803  } else {
3804    // Invalid left-hand side.
3805    Load(node->target());
3806    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
3807    // The runtime call doesn't actually return but the code generator will
3808    // still generate code and expects a certain frame height.
3809    frame_->EmitPush(r0);
3810  }
3811  ASSERT_EQ(original_height + 1, frame_->height());
3812}
3813
3814
3815void CodeGenerator::VisitThrow(Throw* node) {
3816#ifdef DEBUG
3817  int original_height = frame_->height();
3818#endif
3819  Comment cmnt(masm_, "[ Throw");
3820
3821  Load(node->exception());
3822  CodeForSourcePosition(node->position());
3823  frame_->CallRuntime(Runtime::kThrow, 1);
3824  frame_->EmitPush(r0);
3825  ASSERT_EQ(original_height + 1, frame_->height());
3826}
3827
3828
3829void CodeGenerator::VisitProperty(Property* node) {
3830#ifdef DEBUG
3831  int original_height = frame_->height();
3832#endif
3833  Comment cmnt(masm_, "[ Property");
3834
3835  { Reference property(this, node);
3836    property.GetValue();
3837  }
3838  ASSERT_EQ(original_height + 1, frame_->height());
3839}
3840
3841
3842void CodeGenerator::VisitCall(Call* node) {
3843#ifdef DEBUG
3844  int original_height = frame_->height();
3845#endif
3846  Comment cmnt(masm_, "[ Call");
3847
3848  Expression* function = node->expression();
3849  ZoneList<Expression*>* args = node->arguments();
3850
3851  // Standard function call.
3852  // Check if the function is a variable or a property.
3853  Variable* var = function->AsVariableProxy()->AsVariable();
3854  Property* property = function->AsProperty();
3855
3856  // ------------------------------------------------------------------------
3857  // Fast-case: Use inline caching.
3858  // ---
3859  // According to ECMA-262, section 11.2.3, page 44, the function to call
3860  // must be resolved after the arguments have been evaluated. The IC code
3861  // automatically handles this by loading the arguments before the function
3862  // is resolved in cache misses (this also holds for megamorphic calls).
3863  // ------------------------------------------------------------------------
3864
3865  if (var != NULL && var->is_possibly_eval()) {
3866    VirtualFrame::SpilledScope spilled_scope(frame_);
3867    // ----------------------------------
3868    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
3869    // ----------------------------------
3870
3871    // In a call to eval, we first call %ResolvePossiblyDirectEval to
3872    // resolve the function we need to call and the receiver of the
3873    // call.  Then we call the resolved function using the given
3874    // arguments.
3875
3876    // Prepare stack for call to resolved function.
3877    Load(function);
3878
3879    // Allocate a frame slot for the receiver.
3880    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3881    frame_->EmitPush(r2);
3882
3883    // Load the arguments.
3884    int arg_count = args->length();
3885    for (int i = 0; i < arg_count; i++) {
3886      Load(args->at(i));
3887    }
3888
3889    // If we know that eval can only be shadowed by eval-introduced
3890    // variables we attempt to load the global eval function directly
3891    // in generated code. If we succeed, there is no need to perform a
3892    // context lookup in the runtime system.
3893    JumpTarget done;
3894    if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
3895      ASSERT(var->slot()->type() == Slot::LOOKUP);
3896      JumpTarget slow;
3897      // Prepare the stack for the call to
3898      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
3899      // function, the first argument to the eval call and the
3900      // receiver.
3901      LoadFromGlobalSlotCheckExtensions(var->slot(),
3902                                        NOT_INSIDE_TYPEOF,
3903                                        &slow);
3904      frame_->EmitPush(r0);
3905      if (arg_count > 0) {
3906        __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3907        frame_->EmitPush(r1);
3908      } else {
3909        frame_->EmitPush(r2);
3910      }
3911      __ ldr(r1, frame_->Receiver());
3912      frame_->EmitPush(r1);
3913
3914      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3);
3915
3916      done.Jump();
3917      slow.Bind();
3918    }
3919
3920    // Prepare the stack for the call to ResolvePossiblyDirectEval by
3921    // pushing the loaded function, the first argument to the eval
3922    // call and the receiver.
3923    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
3924    frame_->EmitPush(r1);
3925    if (arg_count > 0) {
3926      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
3927      frame_->EmitPush(r1);
3928    } else {
3929      frame_->EmitPush(r2);
3930    }
3931    __ ldr(r1, frame_->Receiver());
3932    frame_->EmitPush(r1);
3933
3934    // Resolve the call.
3935    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
3936
3937    // If we generated fast-case code bind the jump-target where fast
3938    // and slow case merge.
3939    if (done.is_linked()) done.Bind();
3940
3941    // Touch up stack with the right values for the function and the receiver.
3942    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
3943    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
3944
3945    // Call the function.
3946    CodeForSourcePosition(node->position());
3947
3948    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3949    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
3950    frame_->CallStub(&call_function, arg_count + 1);
3951
3952    __ ldr(cp, frame_->Context());
3953    // Remove the function from the stack.
3954    frame_->Drop();
3955    frame_->EmitPush(r0);
3956
3957  } else if (var != NULL && !var->is_this() && var->is_global()) {
3958    // ----------------------------------
3959    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
3960    // ----------------------------------
3961    // Pass the global object as the receiver and let the IC stub
3962    // patch the stack to use the global proxy as 'this' in the
3963    // invoked function.
3964    LoadGlobal();
3965
3966    // Load the arguments.
3967    int arg_count = args->length();
3968    for (int i = 0; i < arg_count; i++) {
3969      Load(args->at(i));
3970    }
3971
3972    VirtualFrame::SpilledScope spilled_scope(frame_);
3973    // Setup the name register and call the IC initialization code.
3974    __ mov(r2, Operand(var->name()));
3975    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
3976    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
3977    CodeForSourcePosition(node->position());
3978    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
3979                           arg_count + 1);
3980    __ ldr(cp, frame_->Context());
3981    frame_->EmitPush(r0);
3982
3983  } else if (var != NULL && var->slot() != NULL &&
3984             var->slot()->type() == Slot::LOOKUP) {
3985    VirtualFrame::SpilledScope spilled_scope(frame_);
3986    // ----------------------------------
3987    // JavaScript examples:
3988    //
3989    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
3990    //
3991    //  function f() {};
3992    //  function g() {
3993    //    eval(...);
3994    //    f();  // f could be in extension object.
3995    //  }
3996    // ----------------------------------
3997
3998    // JumpTargets do not yet support merging frames so the frame must be
3999    // spilled when jumping to these targets.
4000    JumpTarget slow, done;
4001
4002    // Generate fast case for loading functions from slots that
4003    // correspond to local/global variables or arguments unless they
4004    // are shadowed by eval-introduced bindings.
4005    EmitDynamicLoadFromSlotFastCase(var->slot(),
4006                                    NOT_INSIDE_TYPEOF,
4007                                    &slow,
4008                                    &done);
4009
4010    slow.Bind();
4011    // Load the function
4012    frame_->EmitPush(cp);
4013    __ mov(r0, Operand(var->name()));
4014    frame_->EmitPush(r0);
4015    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4016    // r0: slot value; r1: receiver
4017
4018    // Load the receiver.
4019    frame_->EmitPush(r0);  // function
4020    frame_->EmitPush(r1);  // receiver
4021
4022    // If fast case code has been generated, emit code to push the
4023    // function and receiver and have the slow path jump around this
4024    // code.
4025    if (done.is_linked()) {
4026      JumpTarget call;
4027      call.Jump();
4028      done.Bind();
4029      frame_->EmitPush(r0);  // function
4030      LoadGlobalReceiver(r1);  // receiver
4031      call.Bind();
4032    }
4033
4034    // Call the function. At this point, everything is spilled but the
4035    // function and receiver are in r0 and r1.
4036    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4037    frame_->EmitPush(r0);
4038
4039  } else if (property != NULL) {
4040    // Check if the key is a literal string.
4041    Literal* literal = property->key()->AsLiteral();
4042
4043    if (literal != NULL && literal->handle()->IsSymbol()) {
4044      // ------------------------------------------------------------------
4045      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
4046      // ------------------------------------------------------------------
4047
4048      Handle<String> name = Handle<String>::cast(literal->handle());
4049
4050      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
4051          name->IsEqualTo(CStrVector("apply")) &&
4052          args->length() == 2 &&
4053          args->at(1)->AsVariableProxy() != NULL &&
4054          args->at(1)->AsVariableProxy()->IsArguments()) {
4055        // Use the optimized Function.prototype.apply that avoids
4056        // allocating lazily allocated arguments objects.
4057        CallApplyLazy(property->obj(),
4058                      args->at(0),
4059                      args->at(1)->AsVariableProxy(),
4060                      node->position());
4061
4062      } else {
4063        Load(property->obj());  // Receiver.
4064        // Load the arguments.
4065        int arg_count = args->length();
4066        for (int i = 0; i < arg_count; i++) {
4067          Load(args->at(i));
4068        }
4069
4070        VirtualFrame::SpilledScope spilled_scope(frame_);
4071        // Set the name register and call the IC initialization code.
4072        __ mov(r2, Operand(name));
4073        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4074        Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
4075        CodeForSourcePosition(node->position());
4076        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4077        __ ldr(cp, frame_->Context());
4078        frame_->EmitPush(r0);
4079      }
4080
4081    } else {
4082      // -------------------------------------------
4083      // JavaScript example: 'array[index](1, 2, 3)'
4084      // -------------------------------------------
4085      VirtualFrame::SpilledScope spilled_scope(frame_);
4086
4087      Load(property->obj());
4088      if (property->is_synthetic()) {
4089        Load(property->key());
4090        EmitKeyedLoad();
4091        // Put the function below the receiver.
4092        // Use the global receiver.
4093        frame_->EmitPush(r0);  // Function.
4094        LoadGlobalReceiver(r0);
4095        // Call the function.
4096        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
4097        frame_->EmitPush(r0);
4098      } else {
4099        // Load the arguments.
4100        int arg_count = args->length();
4101        for (int i = 0; i < arg_count; i++) {
4102          Load(args->at(i));
4103        }
4104
4105        // Set the name register and call the IC initialization code.
4106        Load(property->key());
4107        frame_->EmitPop(r2);  // Function name.
4108
4109        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
4110        Handle<Code> stub = ComputeKeyedCallInitialize(arg_count, in_loop);
4111        CodeForSourcePosition(node->position());
4112        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
4113        __ ldr(cp, frame_->Context());
4114        frame_->EmitPush(r0);
4115      }
4116    }
4117
4118  } else {
4119    // ----------------------------------
4120    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
4121    // ----------------------------------
4122
4123    // Load the function.
4124    Load(function);
4125
4126    VirtualFrame::SpilledScope spilled_scope(frame_);
4127
4128    // Pass the global proxy as the receiver.
4129    LoadGlobalReceiver(r0);
4130
4131    // Call the function.
4132    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
4133    frame_->EmitPush(r0);
4134  }
4135  ASSERT_EQ(original_height + 1, frame_->height());
4136}
4137
4138
4139void CodeGenerator::VisitCallNew(CallNew* node) {
4140#ifdef DEBUG
4141  int original_height = frame_->height();
4142#endif
4143  Comment cmnt(masm_, "[ CallNew");
4144
4145  // According to ECMA-262, section 11.2.2, page 44, the function
4146  // expression in new calls must be evaluated before the
4147  // arguments. This is different from ordinary calls, where the
4148  // actual function to call is resolved after the arguments have been
4149  // evaluated.
4150
4151  // Compute function to call and use the global object as the
4152  // receiver. There is no need to use the global proxy here because
4153  // it will always be replaced with a newly allocated object.
4154  Load(node->expression());
4155  LoadGlobal();
4156
4157  // Push the arguments ("left-to-right") on the stack.
4158  ZoneList<Expression*>* args = node->arguments();
4159  int arg_count = args->length();
4160  for (int i = 0; i < arg_count; i++) {
4161    Load(args->at(i));
4162  }
4163
4164  VirtualFrame::SpilledScope spilled_scope(frame_);
4165
4166  // r0: the number of arguments.
4167  __ mov(r0, Operand(arg_count));
4168  // Load the function into r1 as per calling convention.
4169  __ ldr(r1, frame_->ElementAt(arg_count + 1));
4170
4171  // Call the construct call builtin that handles allocation and
4172  // constructor invocation.
4173  CodeForSourcePosition(node->position());
4174  Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
4175  frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
4176
4177  // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
4178  __ str(r0, frame_->Top());
4179  ASSERT_EQ(original_height + 1, frame_->height());
4180}
4181
4182
4183void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4184  VirtualFrame::SpilledScope spilled_scope(frame_);
4185  ASSERT(args->length() == 1);
4186  JumpTarget leave, null, function, non_function_constructor;
4187
4188  // Load the object into r0.
4189  Load(args->at(0));
4190  frame_->EmitPop(r0);
4191
4192  // If the object is a smi, we return null.
4193  __ tst(r0, Operand(kSmiTagMask));
4194  null.Branch(eq);
4195
4196  // Check that the object is a JS object but take special care of JS
4197  // functions to make sure they have 'Function' as their class.
4198  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
4199  null.Branch(lt);
4200
4201  // As long as JS_FUNCTION_TYPE is the last instance type and it is
4202  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4203  // LAST_JS_OBJECT_TYPE.
4204  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4205  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4206  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
4207  function.Branch(eq);
4208
4209  // Check if the constructor in the map is a function.
4210  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
4211  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
4212  non_function_constructor.Branch(ne);
4213
4214  // The r0 register now contains the constructor function. Grab the
4215  // instance class name from there.
4216  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
4217  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
4218  frame_->EmitPush(r0);
4219  leave.Jump();
4220
4221  // Functions have class 'Function'.
4222  function.Bind();
4223  __ mov(r0, Operand(Factory::function_class_symbol()));
4224  frame_->EmitPush(r0);
4225  leave.Jump();
4226
4227  // Objects with a non-function constructor have class 'Object'.
4228  non_function_constructor.Bind();
4229  __ mov(r0, Operand(Factory::Object_symbol()));
4230  frame_->EmitPush(r0);
4231  leave.Jump();
4232
4233  // Non-JS objects have class null.
4234  null.Bind();
4235  __ LoadRoot(r0, Heap::kNullValueRootIndex);
4236  frame_->EmitPush(r0);
4237
4238  // All done.
4239  leave.Bind();
4240}
4241
4242
4243void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4244  VirtualFrame::SpilledScope spilled_scope(frame_);
4245  ASSERT(args->length() == 1);
4246  JumpTarget leave;
4247  Load(args->at(0));
4248  frame_->EmitPop(r0);  // r0 contains object.
4249  // if (object->IsSmi()) return the object.
4250  __ tst(r0, Operand(kSmiTagMask));
4251  leave.Branch(eq);
4252  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4253  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
4254  leave.Branch(ne);
4255  // Load the value.
4256  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
4257  leave.Bind();
4258  frame_->EmitPush(r0);
4259}
4260
4261
4262void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4263  VirtualFrame::SpilledScope spilled_scope(frame_);
4264  ASSERT(args->length() == 2);
4265  JumpTarget leave;
4266  Load(args->at(0));    // Load the object.
4267  Load(args->at(1));    // Load the value.
4268  frame_->EmitPop(r0);  // r0 contains value
4269  frame_->EmitPop(r1);  // r1 contains object
4270  // if (object->IsSmi()) return object.
4271  __ tst(r1, Operand(kSmiTagMask));
4272  leave.Branch(eq);
4273  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
4274  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
4275  leave.Branch(ne);
4276  // Store the value.
4277  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
4278  // Update the write barrier.
4279  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
4280  // Leave.
4281  leave.Bind();
4282  frame_->EmitPush(r0);
4283}
4284
4285
4286void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
4287  ASSERT(args->length() == 1);
4288  Load(args->at(0));
4289  Register reg = frame_->PopToRegister();
4290  __ tst(reg, Operand(kSmiTagMask));
4291  cc_reg_ = eq;
4292}
4293
4294
4295void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
4296  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
4297  ASSERT_EQ(args->length(), 3);
4298#ifdef ENABLE_LOGGING_AND_PROFILING
4299  if (ShouldGenerateLog(args->at(0))) {
4300    Load(args->at(1));
4301    Load(args->at(2));
4302    frame_->CallRuntime(Runtime::kLog, 2);
4303  }
4304#endif
4305  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4306}
4307
4308
4309void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
4310  ASSERT(args->length() == 1);
4311  Load(args->at(0));
4312  Register reg = frame_->PopToRegister();
4313  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
4314  cc_reg_ = eq;
4315}
4316
4317
4318// Generates the Math.pow method - currently just calls runtime.
4319void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
4320  ASSERT(args->length() == 2);
4321  Load(args->at(0));
4322  Load(args->at(1));
4323  frame_->CallRuntime(Runtime::kMath_pow, 2);
4324  frame_->EmitPush(r0);
4325}
4326
4327
4328// Generates the Math.sqrt method - currently just calls runtime.
4329void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
4330  ASSERT(args->length() == 1);
4331  Load(args->at(0));
4332  frame_->CallRuntime(Runtime::kMath_sqrt, 1);
4333  frame_->EmitPush(r0);
4334}
4335
4336
4337class DeferredStringCharCodeAt : public DeferredCode {
4338 public:
4339  DeferredStringCharCodeAt(Register object,
4340                           Register index,
4341                           Register scratch,
4342                           Register result)
4343      : result_(result),
4344        char_code_at_generator_(object,
4345                                index,
4346                                scratch,
4347                                result,
4348                                &need_conversion_,
4349                                &need_conversion_,
4350                                &index_out_of_range_,
4351                                STRING_INDEX_IS_NUMBER) {}
4352
4353  StringCharCodeAtGenerator* fast_case_generator() {
4354    return &char_code_at_generator_;
4355  }
4356
4357  virtual void Generate() {
4358    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4359    char_code_at_generator_.GenerateSlow(masm(), call_helper);
4360
4361    __ bind(&need_conversion_);
4362    // Move the undefined value into the result register, which will
4363    // trigger conversion.
4364    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
4365    __ jmp(exit_label());
4366
4367    __ bind(&index_out_of_range_);
4368    // When the index is out of range, the spec requires us to return
4369    // NaN.
4370    __ LoadRoot(result_, Heap::kNanValueRootIndex);
4371    __ jmp(exit_label());
4372  }
4373
4374 private:
4375  Register result_;
4376
4377  Label need_conversion_;
4378  Label index_out_of_range_;
4379
4380  StringCharCodeAtGenerator char_code_at_generator_;
4381};
4382
4383
4384// This generates code that performs a String.prototype.charCodeAt() call
4385// or returns a smi in order to trigger conversion.
4386void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
4387  VirtualFrame::SpilledScope spilled_scope(frame_);
4388  Comment(masm_, "[ GenerateStringCharCodeAt");
4389  ASSERT(args->length() == 2);
4390
4391  Load(args->at(0));
4392  Load(args->at(1));
4393
4394  Register index = r1;
4395  Register object = r2;
4396
4397  frame_->EmitPop(r1);
4398  frame_->EmitPop(r2);
4399
4400  // We need two extra registers.
4401  Register scratch = r3;
4402  Register result = r0;
4403
4404  DeferredStringCharCodeAt* deferred =
4405      new DeferredStringCharCodeAt(object,
4406                                   index,
4407                                   scratch,
4408                                   result);
4409  deferred->fast_case_generator()->GenerateFast(masm_);
4410  deferred->BindExit();
4411  frame_->EmitPush(result);
4412}
4413
4414
4415class DeferredStringCharFromCode : public DeferredCode {
4416 public:
4417  DeferredStringCharFromCode(Register code,
4418                             Register result)
4419      : char_from_code_generator_(code, result) {}
4420
4421  StringCharFromCodeGenerator* fast_case_generator() {
4422    return &char_from_code_generator_;
4423  }
4424
4425  virtual void Generate() {
4426    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4427    char_from_code_generator_.GenerateSlow(masm(), call_helper);
4428  }
4429
4430 private:
4431  StringCharFromCodeGenerator char_from_code_generator_;
4432};
4433
4434
4435// Generates code for creating a one-char string from a char code.
4436void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
4437  VirtualFrame::SpilledScope spilled_scope(frame_);
4438  Comment(masm_, "[ GenerateStringCharFromCode");
4439  ASSERT(args->length() == 1);
4440
4441  Load(args->at(0));
4442
4443  Register code = r1;
4444  Register result = r0;
4445
4446  frame_->EmitPop(code);
4447
4448  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
4449      code, result);
4450  deferred->fast_case_generator()->GenerateFast(masm_);
4451  deferred->BindExit();
4452  frame_->EmitPush(result);
4453}
4454
4455
4456class DeferredStringCharAt : public DeferredCode {
4457 public:
4458  DeferredStringCharAt(Register object,
4459                       Register index,
4460                       Register scratch1,
4461                       Register scratch2,
4462                       Register result)
4463      : result_(result),
4464        char_at_generator_(object,
4465                           index,
4466                           scratch1,
4467                           scratch2,
4468                           result,
4469                           &need_conversion_,
4470                           &need_conversion_,
4471                           &index_out_of_range_,
4472                           STRING_INDEX_IS_NUMBER) {}
4473
4474  StringCharAtGenerator* fast_case_generator() {
4475    return &char_at_generator_;
4476  }
4477
4478  virtual void Generate() {
4479    VirtualFrameRuntimeCallHelper call_helper(frame_state());
4480    char_at_generator_.GenerateSlow(masm(), call_helper);
4481
4482    __ bind(&need_conversion_);
4483    // Move smi zero into the result register, which will trigger
4484    // conversion.
4485    __ mov(result_, Operand(Smi::FromInt(0)));
4486    __ jmp(exit_label());
4487
4488    __ bind(&index_out_of_range_);
4489    // When the index is out of range, the spec requires us to return
4490    // the empty string.
4491    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
4492    __ jmp(exit_label());
4493  }
4494
4495 private:
4496  Register result_;
4497
4498  Label need_conversion_;
4499  Label index_out_of_range_;
4500
4501  StringCharAtGenerator char_at_generator_;
4502};
4503
4504
4505// This generates code that performs a String.prototype.charAt() call
4506// or returns a smi in order to trigger conversion.
4507void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
4508  VirtualFrame::SpilledScope spilled_scope(frame_);
4509  Comment(masm_, "[ GenerateStringCharAt");
4510  ASSERT(args->length() == 2);
4511
4512  Load(args->at(0));
4513  Load(args->at(1));
4514
4515  Register index = r1;
4516  Register object = r2;
4517
4518  frame_->EmitPop(r1);
4519  frame_->EmitPop(r2);
4520
4521  // We need three extra registers.
4522  Register scratch1 = r3;
4523  Register scratch2 = r4;
4524  Register result = r0;
4525
4526  DeferredStringCharAt* deferred =
4527      new DeferredStringCharAt(object,
4528                               index,
4529                               scratch1,
4530                               scratch2,
4531                               result);
4532  deferred->fast_case_generator()->GenerateFast(masm_);
4533  deferred->BindExit();
4534  frame_->EmitPush(result);
4535}
4536
4537
4538void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
4539  ASSERT(args->length() == 1);
4540  Load(args->at(0));
4541  JumpTarget answer;
4542  // We need the CC bits to come out as not_equal in the case where the
4543  // object is a smi.  This can't be done with the usual test opcode so
4544  // we use XOR to get the right CC bits.
4545  Register possible_array = frame_->PopToRegister();
4546  Register scratch = VirtualFrame::scratch0();
4547  __ and_(scratch, possible_array, Operand(kSmiTagMask));
4548  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4549  answer.Branch(ne);
4550  // It is a heap object - get the map. Check if the object is a JS array.
4551  __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
4552  answer.Bind();
4553  cc_reg_ = eq;
4554}
4555
4556
4557void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
4558  ASSERT(args->length() == 1);
4559  Load(args->at(0));
4560  JumpTarget answer;
4561  // We need the CC bits to come out as not_equal in the case where the
4562  // object is a smi.  This can't be done with the usual test opcode so
4563  // we use XOR to get the right CC bits.
4564  Register possible_regexp = frame_->PopToRegister();
4565  Register scratch = VirtualFrame::scratch0();
4566  __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
4567  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
4568  answer.Branch(ne);
4569  // It is a heap object - get the map. Check if the object is a regexp.
4570  __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
4571  answer.Bind();
4572  cc_reg_ = eq;
4573}
4574
4575
4576void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
4577  // This generates a fast version of:
4578  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
4579  ASSERT(args->length() == 1);
4580  Load(args->at(0));
4581  Register possible_object = frame_->PopToRegister();
4582  __ tst(possible_object, Operand(kSmiTagMask));
4583  false_target()->Branch(eq);
4584
4585  __ LoadRoot(ip, Heap::kNullValueRootIndex);
4586  __ cmp(possible_object, ip);
4587  true_target()->Branch(eq);
4588
4589  Register map_reg = VirtualFrame::scratch0();
4590  __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
4591  // Undetectable objects behave like undefined when tested with typeof.
4592  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
4593  __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
4594  false_target()->Branch(ne);
4595
4596  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
4597  __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
4598  false_target()->Branch(lt);
4599  __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
4600  cc_reg_ = le;
4601}
4602
4603
4604void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
4605  // This generates a fast version of:
4606  // (%_ClassOf(arg) === 'Function')
4607  ASSERT(args->length() == 1);
4608  Load(args->at(0));
4609  Register possible_function = frame_->PopToRegister();
4610  __ tst(possible_function, Operand(kSmiTagMask));
4611  false_target()->Branch(eq);
4612  Register map_reg = VirtualFrame::scratch0();
4613  Register scratch = VirtualFrame::scratch1();
4614  __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
4615  cc_reg_ = eq;
4616}
4617
4618
4619void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
4620  ASSERT(args->length() == 1);
4621  Load(args->at(0));
4622  Register possible_undetectable = frame_->PopToRegister();
4623  __ tst(possible_undetectable, Operand(kSmiTagMask));
4624  false_target()->Branch(eq);
4625  Register scratch = VirtualFrame::scratch0();
4626  __ ldr(scratch,
4627         FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
4628  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
4629  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
4630  cc_reg_ = ne;
4631}
4632
4633
4634void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
4635  ASSERT(args->length() == 0);
4636
4637  Register scratch0 = VirtualFrame::scratch0();
4638  Register scratch1 = VirtualFrame::scratch1();
4639  // Get the frame pointer for the calling frame.
4640  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4641
4642  // Skip the arguments adaptor frame if it exists.
4643  __ ldr(scratch1,
4644         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4645  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4646  __ ldr(scratch0,
4647         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
4648
4649  // Check the marker in the calling frame.
4650  __ ldr(scratch1,
4651         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
4652  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4653  cc_reg_ = eq;
4654}
4655
4656
4657void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
4658  ASSERT(args->length() == 0);
4659
4660  Register tos = frame_->GetTOSRegister();
4661  Register scratch0 = VirtualFrame::scratch0();
4662  Register scratch1 = VirtualFrame::scratch1();
4663
4664  // Check if the calling frame is an arguments adaptor frame.
4665  __ ldr(scratch0,
4666         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4667  __ ldr(scratch1,
4668         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
4669  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4670
4671  // Get the number of formal parameters.
4672  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
4673
4674  // Arguments adaptor case: Read the arguments length from the
4675  // adaptor frame.
4676  __ ldr(tos,
4677         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
4678         eq);
4679
4680  frame_->EmitPush(tos);
4681}
4682
4683
4684void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
4685  VirtualFrame::SpilledScope spilled_scope(frame_);
4686  ASSERT(args->length() == 1);
4687
4688  // Satisfy contract with ArgumentsAccessStub:
4689  // Load the key into r1 and the formal parameters count into r0.
4690  Load(args->at(0));
4691  frame_->EmitPop(r1);
4692  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
4693
4694  // Call the shared stub to get to arguments[key].
4695  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
4696  frame_->CallStub(&stub, 0);
4697  frame_->EmitPush(r0);
4698}
4699
4700
4701void CodeGenerator::GenerateRandomHeapNumber(
4702    ZoneList<Expression*>* args) {
4703  VirtualFrame::SpilledScope spilled_scope(frame_);
4704  ASSERT(args->length() == 0);
4705
4706  Label slow_allocate_heapnumber;
4707  Label heapnumber_allocated;
4708
4709  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4710  __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
4711  __ jmp(&heapnumber_allocated);
4712
4713  __ bind(&slow_allocate_heapnumber);
4714  // To allocate a heap number, and ensure that it is not a smi, we
4715  // call the runtime function FUnaryMinus on 0, returning the double
4716  // -0.0. A new, distinct heap number is returned each time.
4717  __ mov(r0, Operand(Smi::FromInt(0)));
4718  __ push(r0);
4719  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
4720  __ mov(r4, Operand(r0));
4721
4722  __ bind(&heapnumber_allocated);
4723
4724  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
4725  // by computing:
4726  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4727  if (CpuFeatures::IsSupported(VFP3)) {
4728    __ PrepareCallCFunction(0, r1);
4729    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
4730
4731    CpuFeatures::Scope scope(VFP3);
4732    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
4733    // Create this constant using mov/orr to avoid PC relative load.
4734    __ mov(r1, Operand(0x41000000));
4735    __ orr(r1, r1, Operand(0x300000));
4736    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
4737    __ vmov(d7, r0, r1);
4738    // Move 0x4130000000000000 to VFP.
4739    __ mov(r0, Operand(0));
4740    __ vmov(d8, r0, r1);
4741    // Subtract and store the result in the heap number.
4742    __ vsub(d7, d7, d8);
4743    __ sub(r0, r4, Operand(kHeapObjectTag));
4744    __ vstr(d7, r0, HeapNumber::kValueOffset);
4745    frame_->EmitPush(r4);
4746  } else {
4747    __ mov(r0, Operand(r4));
4748    __ PrepareCallCFunction(1, r1);
4749    __ CallCFunction(
4750        ExternalReference::fill_heap_number_with_random_function(), 1);
4751    frame_->EmitPush(r0);
4752  }
4753}
4754
4755
4756void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
4757  ASSERT_EQ(2, args->length());
4758
4759  Load(args->at(0));
4760  Load(args->at(1));
4761
4762  StringAddStub stub(NO_STRING_ADD_FLAGS);
4763  frame_->SpillAll();
4764  frame_->CallStub(&stub, 2);
4765  frame_->EmitPush(r0);
4766}
4767
4768
4769void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
4770  ASSERT_EQ(3, args->length());
4771
4772  Load(args->at(0));
4773  Load(args->at(1));
4774  Load(args->at(2));
4775
4776  SubStringStub stub;
4777  frame_->SpillAll();
4778  frame_->CallStub(&stub, 3);
4779  frame_->EmitPush(r0);
4780}
4781
4782
4783void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
4784  ASSERT_EQ(2, args->length());
4785
4786  Load(args->at(0));
4787  Load(args->at(1));
4788
4789  StringCompareStub stub;
4790  frame_->SpillAll();
4791  frame_->CallStub(&stub, 2);
4792  frame_->EmitPush(r0);
4793}
4794
4795
4796void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
4797  ASSERT_EQ(4, args->length());
4798
4799  Load(args->at(0));
4800  Load(args->at(1));
4801  Load(args->at(2));
4802  Load(args->at(3));
4803  RegExpExecStub stub;
4804  frame_->SpillAll();
4805  frame_->CallStub(&stub, 4);
4806  frame_->EmitPush(r0);
4807}
4808
4809
4810void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
4811  // No stub. This code only occurs a few times in regexp.js.
4812  const int kMaxInlineLength = 100;
4813  ASSERT_EQ(3, args->length());
4814  Load(args->at(0));  // Size of array, smi.
4815  Load(args->at(1));  // "index" property value.
4816  Load(args->at(2));  // "input" property value.
4817  {
4818    VirtualFrame::SpilledScope spilled_scope(frame_);
4819    Label slowcase;
4820    Label done;
4821    __ ldr(r1, MemOperand(sp, kPointerSize * 2));
4822    STATIC_ASSERT(kSmiTag == 0);
4823    STATIC_ASSERT(kSmiTagSize == 1);
4824    __ tst(r1, Operand(kSmiTagMask));
4825    __ b(ne, &slowcase);
4826    __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
4827    __ b(hi, &slowcase);
4828    // Smi-tagging is equivalent to multiplying by 2.
4829    // Allocate RegExpResult followed by FixedArray with size in ebx.
4830    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
4831    // Elements:  [Map][Length][..elements..]
4832    // Size of JSArray with two in-object properties and the header of a
4833    // FixedArray.
4834    int objects_size =
4835        (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4836    __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
4837    __ add(r2, r5, Operand(objects_size));
4838    __ AllocateInNewSpace(
4839        r2,  // In: Size, in words.
4840        r0,  // Out: Start of allocation (tagged).
4841        r3,  // Scratch register.
4842        r4,  // Scratch register.
4843        &slowcase,
4844        static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4845    // r0: Start of allocated area, object-tagged.
4846    // r1: Number of elements in array, as smi.
4847    // r5: Number of elements, untagged.
4848
4849    // Set JSArray map to global.regexp_result_map().
4850    // Set empty properties FixedArray.
4851    // Set elements to point to FixedArray allocated right after the JSArray.
4852    // Interleave operations for better latency.
4853    __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
4854    __ add(r3, r0, Operand(JSRegExpResult::kSize));
4855    __ mov(r4, Operand(Factory::empty_fixed_array()));
4856    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
4857    __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
4858    __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
4859    __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
4860    __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4861
4862    // Set input, index and length fields from arguments.
4863    __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
4864    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
4865    __ add(sp, sp, Operand(kPointerSize));
4866    __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
4867    __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
4868
4869    // Fill out the elements FixedArray.
4870    // r0: JSArray, tagged.
4871    // r3: FixedArray, tagged.
4872    // r5: Number of elements in array, untagged.
4873
4874    // Set map.
4875    __ mov(r2, Operand(Factory::fixed_array_map()));
4876    __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
4877    // Set FixedArray length.
4878    __ mov(r6, Operand(r5, LSL, kSmiTagSize));
4879    __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
4880    // Fill contents of fixed-array with the-hole.
4881    __ mov(r2, Operand(Factory::the_hole_value()));
4882    __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4883    // Fill fixed array elements with hole.
4884    // r0: JSArray, tagged.
4885    // r2: the hole.
4886    // r3: Start of elements in FixedArray.
4887    // r5: Number of elements to fill.
4888    Label loop;
4889    __ tst(r5, Operand(r5));
4890    __ bind(&loop);
4891    __ b(le, &done);  // Jump if r1 is negative or zero.
4892    __ sub(r5, r5, Operand(1), SetCC);
4893    __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
4894    __ jmp(&loop);
4895
4896    __ bind(&slowcase);
4897    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
4898
4899    __ bind(&done);
4900  }
4901  frame_->Forget(3);
4902  frame_->EmitPush(r0);
4903}
4904
4905
4906class DeferredSearchCache: public DeferredCode {
4907 public:
4908  DeferredSearchCache(Register dst, Register cache, Register key)
4909      : dst_(dst), cache_(cache), key_(key) {
4910    set_comment("[ DeferredSearchCache");
4911  }
4912
4913  virtual void Generate();
4914
4915 private:
4916  Register dst_, cache_, key_;
4917};
4918
4919
4920void DeferredSearchCache::Generate() {
4921  __ Push(cache_, key_);
4922  __ CallRuntime(Runtime::kGetFromCache, 2);
4923  if (!dst_.is(r0)) {
4924    __ mov(dst_, r0);
4925  }
4926}
4927
4928
4929void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
4930  ASSERT_EQ(2, args->length());
4931
4932  ASSERT_NE(NULL, args->at(0)->AsLiteral());
4933  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
4934
4935  Handle<FixedArray> jsfunction_result_caches(
4936      Top::global_context()->jsfunction_result_caches());
4937  if (jsfunction_result_caches->length() <= cache_id) {
4938    __ Abort("Attempt to use undefined cache.");
4939    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
4940    return;
4941  }
4942
4943  Load(args->at(1));
4944
4945  VirtualFrame::SpilledScope spilled_scope(frame_);
4946
4947  frame_->EmitPop(r2);
4948
4949  __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
4950  __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
4951  __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
4952  __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
4953
4954  DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
4955
4956  const int kFingerOffset =
4957      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
4958  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4959  __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
4960  // r0 now holds finger offset as a smi.
4961  __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4962  // r3 now points to the start of fixed array elements.
4963  __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
4964  // Note side effect of PreIndex: r3 now points to the key of the pair.
4965  __ cmp(r2, r0);
4966  deferred->Branch(ne);
4967
4968  __ ldr(r0, MemOperand(r3, kPointerSize));
4969
4970  deferred->BindExit();
4971  frame_->EmitPush(r0);
4972}
4973
4974
4975void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
4976  ASSERT_EQ(args->length(), 1);
4977
4978  // Load the argument on the stack and jump to the runtime.
4979  Load(args->at(0));
4980
4981  NumberToStringStub stub;
4982  frame_->SpillAll();
4983  frame_->CallStub(&stub, 1);
4984  frame_->EmitPush(r0);
4985}
4986
4987
4988class DeferredSwapElements: public DeferredCode {
4989 public:
4990  DeferredSwapElements(Register object, Register index1, Register index2)
4991      : object_(object), index1_(index1), index2_(index2) {
4992    set_comment("[ DeferredSwapElements");
4993  }
4994
4995  virtual void Generate();
4996
4997 private:
4998  Register object_, index1_, index2_;
4999};
5000
5001
5002void DeferredSwapElements::Generate() {
5003  __ push(object_);
5004  __ push(index1_);
5005  __ push(index2_);
5006  __ CallRuntime(Runtime::kSwapElements, 3);
5007}
5008
5009
5010void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
5011  Comment cmnt(masm_, "[ GenerateSwapElements");
5012
5013  ASSERT_EQ(3, args->length());
5014
5015  Load(args->at(0));
5016  Load(args->at(1));
5017  Load(args->at(2));
5018
5019  VirtualFrame::SpilledScope spilled_scope(frame_);
5020
5021  Register index2 = r2;
5022  Register index1 = r1;
5023  Register object = r0;
5024  Register tmp1 = r3;
5025  Register tmp2 = r4;
5026
5027  frame_->EmitPop(index2);
5028  frame_->EmitPop(index1);
5029  frame_->EmitPop(object);
5030
5031  DeferredSwapElements* deferred =
5032      new DeferredSwapElements(object, index1, index2);
5033
5034  // Fetch the map and check if array is in fast case.
5035  // Check that object doesn't require security checks and
5036  // has no indexed interceptor.
5037  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
5038  deferred->Branch(lt);
5039  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
5040  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
5041  deferred->Branch(nz);
5042
5043  // Check the object's elements are in fast case.
5044  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
5045  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
5046  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5047  __ cmp(tmp2, ip);
5048  deferred->Branch(ne);
5049
5050  // Smi-tagging is equivalent to multiplying by 2.
5051  STATIC_ASSERT(kSmiTag == 0);
5052  STATIC_ASSERT(kSmiTagSize == 1);
5053
5054  // Check that both indices are smis.
5055  __ mov(tmp2, index1);
5056  __ orr(tmp2, tmp2, index2);
5057  __ tst(tmp2, Operand(kSmiTagMask));
5058  deferred->Branch(nz);
5059
5060  // Bring the offsets into the fixed array in tmp1 into index1 and
5061  // index2.
5062  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5063  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
5064  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
5065
5066  // Swap elements.
5067  Register tmp3 = object;
5068  object = no_reg;
5069  __ ldr(tmp3, MemOperand(tmp1, index1));
5070  __ ldr(tmp2, MemOperand(tmp1, index2));
5071  __ str(tmp3, MemOperand(tmp1, index2));
5072  __ str(tmp2, MemOperand(tmp1, index1));
5073
5074  Label done;
5075  __ InNewSpace(tmp1, tmp2, eq, &done);
5076  // Possible optimization: do a check that both values are Smis
5077  // (or them and test against Smi mask.)
5078
5079  __ mov(tmp2, tmp1);
5080  RecordWriteStub recordWrite1(tmp1, index1, tmp3);
5081  __ CallStub(&recordWrite1);
5082
5083  RecordWriteStub recordWrite2(tmp2, index2, tmp3);
5084  __ CallStub(&recordWrite2);
5085
5086  __ bind(&done);
5087
5088  deferred->BindExit();
5089  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
5090  frame_->EmitPush(tmp1);
5091}
5092
5093
5094void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
5095  Comment cmnt(masm_, "[ GenerateCallFunction");
5096
5097  ASSERT(args->length() >= 2);
5098
5099  int n_args = args->length() - 2;  // for receiver and function.
5100  Load(args->at(0));  // receiver
5101  for (int i = 0; i < n_args; i++) {
5102    Load(args->at(i + 1));
5103  }
5104  Load(args->at(n_args + 1));  // function
5105  frame_->CallJSFunction(n_args);
5106  frame_->EmitPush(r0);
5107}
5108
5109
5110void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
5111  ASSERT_EQ(args->length(), 1);
5112  Load(args->at(0));
5113  if (CpuFeatures::IsSupported(VFP3)) {
5114    TranscendentalCacheStub stub(TranscendentalCache::SIN);
5115    frame_->SpillAllButCopyTOSToR0();
5116    frame_->CallStub(&stub, 1);
5117  } else {
5118    frame_->CallRuntime(Runtime::kMath_sin, 1);
5119  }
5120  frame_->EmitPush(r0);
5121}
5122
5123
5124void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
5125  ASSERT_EQ(args->length(), 1);
5126  Load(args->at(0));
5127  if (CpuFeatures::IsSupported(VFP3)) {
5128    TranscendentalCacheStub stub(TranscendentalCache::COS);
5129    frame_->SpillAllButCopyTOSToR0();
5130    frame_->CallStub(&stub, 1);
5131  } else {
5132    frame_->CallRuntime(Runtime::kMath_cos, 1);
5133  }
5134  frame_->EmitPush(r0);
5135}
5136
5137
5138void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
5139  ASSERT(args->length() == 2);
5140
5141  // Load the two objects into registers and perform the comparison.
5142  Load(args->at(0));
5143  Load(args->at(1));
5144  Register lhs = frame_->PopToRegister();
5145  Register rhs = frame_->PopToRegister(lhs);
5146  __ cmp(lhs, rhs);
5147  cc_reg_ = eq;
5148}
5149
5150
5151void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
5152#ifdef DEBUG
5153  int original_height = frame_->height();
5154#endif
5155  if (CheckForInlineRuntimeCall(node)) {
5156    ASSERT((has_cc() && frame_->height() == original_height) ||
5157           (!has_cc() && frame_->height() == original_height + 1));
5158    return;
5159  }
5160
5161  ZoneList<Expression*>* args = node->arguments();
5162  Comment cmnt(masm_, "[ CallRuntime");
5163  Runtime::Function* function = node->function();
5164
5165  if (function == NULL) {
5166    // Prepare stack for calling JS runtime function.
5167    // Push the builtins object found in the current global object.
5168    Register scratch = VirtualFrame::scratch0();
5169    __ ldr(scratch, GlobalObject());
5170    Register builtins = frame_->GetTOSRegister();
5171    __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
5172    frame_->EmitPush(builtins);
5173  }
5174
5175  // Push the arguments ("left-to-right").
5176  int arg_count = args->length();
5177  for (int i = 0; i < arg_count; i++) {
5178    Load(args->at(i));
5179  }
5180
5181  VirtualFrame::SpilledScope spilled_scope(frame_);
5182
5183  if (function == NULL) {
5184    // Call the JS runtime function.
5185    __ mov(r2, Operand(node->name()));
5186    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
5187    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
5188    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
5189    __ ldr(cp, frame_->Context());
5190    frame_->EmitPush(r0);
5191  } else {
5192    // Call the C runtime function.
5193    frame_->CallRuntime(function, arg_count);
5194    frame_->EmitPush(r0);
5195  }
5196  ASSERT_EQ(original_height + 1, frame_->height());
5197}
5198
5199
5200void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
5201#ifdef DEBUG
5202  int original_height = frame_->height();
5203#endif
5204  VirtualFrame::SpilledScope spilled_scope(frame_);
5205  Comment cmnt(masm_, "[ UnaryOperation");
5206
5207  Token::Value op = node->op();
5208
5209  if (op == Token::NOT) {
5210    LoadCondition(node->expression(), false_target(), true_target(), true);
5211    // LoadCondition may (and usually does) leave a test and branch to
5212    // be emitted by the caller.  In that case, negate the condition.
5213    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
5214
5215  } else if (op == Token::DELETE) {
5216    Property* property = node->expression()->AsProperty();
5217    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5218    if (property != NULL) {
5219      Load(property->obj());
5220      Load(property->key());
5221      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5222      frame_->EmitPush(r0);
5223
5224    } else if (variable != NULL) {
5225      Slot* slot = variable->slot();
5226      if (variable->is_global()) {
5227        LoadGlobal();
5228        frame_->EmitPush(Operand(variable->name()));
5229        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5230        frame_->EmitPush(r0);
5231
5232      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5233        // lookup the context holding the named variable
5234        frame_->EmitPush(cp);
5235        frame_->EmitPush(Operand(variable->name()));
5236        frame_->CallRuntime(Runtime::kLookupContext, 2);
5237        // r0: context
5238        frame_->EmitPush(r0);
5239        frame_->EmitPush(Operand(variable->name()));
5240        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5241        frame_->EmitPush(r0);
5242
5243      } else {
5244        // Default: Result of deleting non-global, not dynamically
5245        // introduced variables is false.
5246        frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
5247      }
5248
5249    } else {
5250      // Default: Result of deleting expressions is true.
5251      Load(node->expression());  // may have side-effects
5252      frame_->Drop();
5253      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
5254    }
5255
5256  } else if (op == Token::TYPEOF) {
5257    // Special case for loading the typeof expression; see comment on
5258    // LoadTypeofExpression().
5259    LoadTypeofExpression(node->expression());
5260    frame_->CallRuntime(Runtime::kTypeof, 1);
5261    frame_->EmitPush(r0);  // r0 has result
5262
5263  } else {
5264    bool overwrite =
5265        (node->expression()->AsBinaryOperation() != NULL &&
5266         node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
5267    Load(node->expression());
5268    switch (op) {
5269      case Token::NOT:
5270      case Token::DELETE:
5271      case Token::TYPEOF:
5272        UNREACHABLE();  // handled above
5273        break;
5274
5275      case Token::SUB: {
5276        VirtualFrame::SpilledScope spilled(frame_);
5277        frame_->EmitPop(r0);
5278        GenericUnaryOpStub stub(Token::SUB, overwrite);
5279        frame_->CallStub(&stub, 0);
5280        frame_->EmitPush(r0);  // r0 has result
5281        break;
5282      }
5283
5284      case Token::BIT_NOT: {
5285        // smi check
5286        VirtualFrame::SpilledScope spilled(frame_);
5287        frame_->EmitPop(r0);
5288        JumpTarget smi_label;
5289        JumpTarget continue_label;
5290        __ tst(r0, Operand(kSmiTagMask));
5291        smi_label.Branch(eq);
5292
5293        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
5294        frame_->CallStub(&stub, 0);
5295        continue_label.Jump();
5296
5297        smi_label.Bind();
5298        __ mvn(r0, Operand(r0));
5299        __ bic(r0, r0, Operand(kSmiTagMask));  // bit-clear inverted smi-tag
5300        continue_label.Bind();
5301        frame_->EmitPush(r0);  // r0 has result
5302        break;
5303      }
5304
5305      case Token::VOID:
5306        frame_->Drop();
5307        frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
5308        break;
5309
5310      case Token::ADD: {
5311        VirtualFrame::SpilledScope spilled(frame_);
5312        frame_->EmitPop(r0);
5313        // Smi check.
5314        JumpTarget continue_label;
5315        __ tst(r0, Operand(kSmiTagMask));
5316        continue_label.Branch(eq);
5317        frame_->EmitPush(r0);
5318        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5319        continue_label.Bind();
5320        frame_->EmitPush(r0);  // r0 has result
5321        break;
5322      }
5323      default:
5324        UNREACHABLE();
5325    }
5326  }
5327  ASSERT(!has_valid_frame() ||
5328         (has_cc() && frame_->height() == original_height) ||
5329         (!has_cc() && frame_->height() == original_height + 1));
5330}
5331
5332
5333void CodeGenerator::VisitCountOperation(CountOperation* node) {
5334#ifdef DEBUG
5335  int original_height = frame_->height();
5336#endif
5337  Comment cmnt(masm_, "[ CountOperation");
5338
5339  bool is_postfix = node->is_postfix();
5340  bool is_increment = node->op() == Token::INC;
5341
5342  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
5343  bool is_const = (var != NULL && var->mode() == Variable::CONST);
5344  bool is_slot = (var != NULL && var->mode() == Variable::VAR);
5345
5346  if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
5347    // The type info declares that this variable is always a Smi.  That
5348    // means it is a Smi both before and after the increment/decrement.
5349    // Lets make use of that to make a very minimal count.
5350    Reference target(this, node->expression(), !is_const);
5351    ASSERT(!target.is_illegal());
5352    target.GetValue();  // Pushes the value.
5353    Register value = frame_->PopToRegister();
5354    if (is_postfix) frame_->EmitPush(value);
5355    if (is_increment) {
5356      __ add(value, value, Operand(Smi::FromInt(1)));
5357    } else {
5358      __ sub(value, value, Operand(Smi::FromInt(1)));
5359    }
5360    frame_->EmitPush(value);
5361    target.SetValue(NOT_CONST_INIT);
5362    if (is_postfix) frame_->Pop();
5363    ASSERT_EQ(original_height + 1, frame_->height());
5364    return;
5365  }
5366
5367  // If it's a postfix expression and its result is not ignored and the
5368  // reference is non-trivial, then push a placeholder on the stack now
5369  // to hold the result of the expression.
5370  bool placeholder_pushed = false;
5371  if (!is_slot && is_postfix) {
5372    frame_->EmitPush(Operand(Smi::FromInt(0)));
5373    placeholder_pushed = true;
5374  }
5375
5376  // A constant reference is not saved to, so a constant reference is not a
5377  // compound assignment reference.
5378  { Reference target(this, node->expression(), !is_const);
5379    if (target.is_illegal()) {
5380      // Spoof the virtual frame to have the expected height (one higher
5381      // than on entry).
5382      if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
5383      ASSERT_EQ(original_height + 1, frame_->height());
5384      return;
5385    }
5386
5387    // This pushes 0, 1 or 2 words on the object to be used later when updating
5388    // the target.  It also pushes the current value of the target.
5389    target.GetValue();
5390
5391    JumpTarget slow;
5392    JumpTarget exit;
5393
5394    Register value = frame_->PopToRegister();
5395
5396    // Postfix: Store the old value as the result.
5397    if (placeholder_pushed) {
5398      frame_->SetElementAt(value, target.size());
5399    } else if (is_postfix) {
5400      frame_->EmitPush(value);
5401      __ mov(VirtualFrame::scratch0(), value);
5402      value = VirtualFrame::scratch0();
5403    }
5404
5405    // Check for smi operand.
5406    __ tst(value, Operand(kSmiTagMask));
5407    slow.Branch(ne);
5408
5409    // Perform optimistic increment/decrement.
5410    if (is_increment) {
5411      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
5412    } else {
5413      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
5414    }
5415
5416    // If the increment/decrement didn't overflow, we're done.
5417    exit.Branch(vc);
5418
5419    // Revert optimistic increment/decrement.
5420    if (is_increment) {
5421      __ sub(value, value, Operand(Smi::FromInt(1)));
5422    } else {
5423      __ add(value, value, Operand(Smi::FromInt(1)));
5424    }
5425
5426    // Slow case: Convert to number.  At this point the
5427    // value to be incremented is in the value register..
5428    slow.Bind();
5429
5430    // Convert the operand to a number.
5431    frame_->EmitPush(value);
5432
5433    {
5434      VirtualFrame::SpilledScope spilled(frame_);
5435      frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
5436
5437      if (is_postfix) {
5438        // Postfix: store to result (on the stack).
5439        __ str(r0, frame_->ElementAt(target.size()));
5440      }
5441
5442      // Compute the new value.
5443      frame_->EmitPush(r0);
5444      frame_->EmitPush(Operand(Smi::FromInt(1)));
5445      if (is_increment) {
5446        frame_->CallRuntime(Runtime::kNumberAdd, 2);
5447      } else {
5448        frame_->CallRuntime(Runtime::kNumberSub, 2);
5449      }
5450    }
5451
5452    __ Move(value, r0);
5453    // Store the new value in the target if not const.
5454    // At this point the answer is in the value register.
5455    exit.Bind();
5456    frame_->EmitPush(value);
5457    // Set the target with the result, leaving the result on
5458    // top of the stack.  Removes the target from the stack if
5459    // it has a non-zero size.
5460    if (!is_const) target.SetValue(NOT_CONST_INIT);
5461  }
5462
5463  // Postfix: Discard the new value and use the old.
5464  if (is_postfix) frame_->Pop();
5465  ASSERT_EQ(original_height + 1, frame_->height());
5466}
5467
5468
5469void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
5470  // According to ECMA-262 section 11.11, page 58, the binary logical
5471  // operators must yield the result of one of the two expressions
5472  // before any ToBoolean() conversions. This means that the value
5473  // produced by a && or || operator is not necessarily a boolean.
5474
5475  // NOTE: If the left hand side produces a materialized value (not in
5476  // the CC register), we force the right hand side to do the
5477  // same. This is necessary because we may have to branch to the exit
5478  // after evaluating the left hand side (due to the shortcut
5479  // semantics), but the compiler must (statically) know if the result
5480  // of compiling the binary operation is materialized or not.
5481  VirtualFrame::SpilledScope spilled_scope(frame_);
5482  if (node->op() == Token::AND) {
5483    JumpTarget is_true;
5484    LoadCondition(node->left(), &is_true, false_target(), false);
5485    if (has_valid_frame() && !has_cc()) {
5486      // The left-hand side result is on top of the virtual frame.
5487      JumpTarget pop_and_continue;
5488      JumpTarget exit;
5489
5490      frame_->Dup();
5491      // Avoid popping the result if it converts to 'false' using the
5492      // standard ToBoolean() conversion as described in ECMA-262,
5493      // section 9.2, page 30.
5494      ToBoolean(&pop_and_continue, &exit);
5495      Branch(false, &exit);
5496
5497      // Pop the result of evaluating the first part.
5498      pop_and_continue.Bind();
5499      frame_->Pop();
5500
5501      // Evaluate right side expression.
5502      is_true.Bind();
5503      Load(node->right());
5504
5505      // Exit (always with a materialized value).
5506      exit.Bind();
5507    } else if (has_cc() || is_true.is_linked()) {
5508      // The left-hand side is either (a) partially compiled to
5509      // control flow with a final branch left to emit or (b) fully
5510      // compiled to control flow and possibly true.
5511      if (has_cc()) {
5512        Branch(false, false_target());
5513      }
5514      is_true.Bind();
5515      LoadCondition(node->right(), true_target(), false_target(), false);
5516    } else {
5517      // Nothing to do.
5518      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
5519    }
5520
5521  } else {
5522    ASSERT(node->op() == Token::OR);
5523    JumpTarget is_false;
5524    LoadCondition(node->left(), true_target(), &is_false, false);
5525    if (has_valid_frame() && !has_cc()) {
5526      // The left-hand side result is on top of the virtual frame.
5527      JumpTarget pop_and_continue;
5528      JumpTarget exit;
5529
5530      frame_->Dup();
5531      // Avoid popping the result if it converts to 'true' using the
5532      // standard ToBoolean() conversion as described in ECMA-262,
5533      // section 9.2, page 30.
5534      ToBoolean(&exit, &pop_and_continue);
5535      Branch(true, &exit);
5536
5537      // Pop the result of evaluating the first part.
5538      pop_and_continue.Bind();
5539      frame_->Pop();
5540
5541      // Evaluate right side expression.
5542      is_false.Bind();
5543      Load(node->right());
5544
5545      // Exit (always with a materialized value).
5546      exit.Bind();
5547    } else if (has_cc() || is_false.is_linked()) {
5548      // The left-hand side is either (a) partially compiled to
5549      // control flow with a final branch left to emit or (b) fully
5550      // compiled to control flow and possibly false.
5551      if (has_cc()) {
5552        Branch(true, true_target());
5553      }
5554      is_false.Bind();
5555      LoadCondition(node->right(), true_target(), false_target(), false);
5556    } else {
5557      // Nothing to do.
5558      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
5559    }
5560  }
5561}
5562
5563
5564void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
5565#ifdef DEBUG
5566  int original_height = frame_->height();
5567#endif
5568  Comment cmnt(masm_, "[ BinaryOperation");
5569
5570  if (node->op() == Token::AND || node->op() == Token::OR) {
5571    GenerateLogicalBooleanOperation(node);
5572  } else {
5573    // Optimize for the case where (at least) one of the expressions
5574    // is a literal small integer.
5575    Literal* lliteral = node->left()->AsLiteral();
5576    Literal* rliteral = node->right()->AsLiteral();
5577    // NOTE: The code below assumes that the slow cases (calls to runtime)
5578    // never return a constant/immutable object.
5579    bool overwrite_left =
5580        (node->left()->AsBinaryOperation() != NULL &&
5581         node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
5582    bool overwrite_right =
5583        (node->right()->AsBinaryOperation() != NULL &&
5584         node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
5585
5586    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
5587      VirtualFrame::RegisterAllocationScope scope(this);
5588      Load(node->left());
5589      if (frame_->KnownSmiAt(0)) overwrite_left = false;
5590      SmiOperation(node->op(),
5591                   rliteral->handle(),
5592                   false,
5593                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
5594    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
5595      VirtualFrame::RegisterAllocationScope scope(this);
5596      Load(node->right());
5597      if (frame_->KnownSmiAt(0)) overwrite_right = false;
5598      SmiOperation(node->op(),
5599                   lliteral->handle(),
5600                   true,
5601                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
5602    } else {
5603      GenerateInlineSmi inline_smi =
5604          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
5605      if (lliteral != NULL) {
5606        ASSERT(!lliteral->handle()->IsSmi());
5607        inline_smi = DONT_GENERATE_INLINE_SMI;
5608      }
5609      if (rliteral != NULL) {
5610        ASSERT(!rliteral->handle()->IsSmi());
5611        inline_smi = DONT_GENERATE_INLINE_SMI;
5612      }
5613      VirtualFrame::RegisterAllocationScope scope(this);
5614      OverwriteMode overwrite_mode = NO_OVERWRITE;
5615      if (overwrite_left) {
5616        overwrite_mode = OVERWRITE_LEFT;
5617      } else if (overwrite_right) {
5618        overwrite_mode = OVERWRITE_RIGHT;
5619      }
5620      Load(node->left());
5621      Load(node->right());
5622      GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
5623    }
5624  }
5625  ASSERT(!has_valid_frame() ||
5626         (has_cc() && frame_->height() == original_height) ||
5627         (!has_cc() && frame_->height() == original_height + 1));
5628}
5629
5630
5631void CodeGenerator::VisitThisFunction(ThisFunction* node) {
5632#ifdef DEBUG
5633  int original_height = frame_->height();
5634#endif
5635  frame_->EmitPush(MemOperand(frame_->Function()));
5636  ASSERT_EQ(original_height + 1, frame_->height());
5637}
5638
5639
5640void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
5641#ifdef DEBUG
5642  int original_height = frame_->height();
5643#endif
5644  Comment cmnt(masm_, "[ CompareOperation");
5645
5646  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
5647
5648  // Get the expressions from the node.
5649  Expression* left = node->left();
5650  Expression* right = node->right();
5651  Token::Value op = node->op();
5652
5653  // To make null checks efficient, we check if either left or right is the
5654  // literal 'null'. If so, we optimize the code by inlining a null check
5655  // instead of calling the (very) general runtime routine for checking
5656  // equality.
5657  if (op == Token::EQ || op == Token::EQ_STRICT) {
5658    bool left_is_null =
5659        left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
5660    bool right_is_null =
5661        right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
5662    // The 'null' value can only be equal to 'null' or 'undefined'.
5663    if (left_is_null || right_is_null) {
5664      Load(left_is_null ? right : left);
5665      Register tos = frame_->PopToRegister();
5666      // JumpTargets can't cope with register allocation yet.
5667      frame_->SpillAll();
5668      __ LoadRoot(ip, Heap::kNullValueRootIndex);
5669      __ cmp(tos, ip);
5670
5671      // The 'null' value is only equal to 'undefined' if using non-strict
5672      // comparisons.
5673      if (op != Token::EQ_STRICT) {
5674        true_target()->Branch(eq);
5675
5676        __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5677        __ cmp(tos, Operand(ip));
5678        true_target()->Branch(eq);
5679
5680        __ tst(tos, Operand(kSmiTagMask));
5681        false_target()->Branch(eq);
5682
5683        // It can be an undetectable object.
5684        __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5685        __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
5686        __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5687        __ cmp(tos, Operand(1 << Map::kIsUndetectable));
5688      }
5689
5690      cc_reg_ = eq;
5691      ASSERT(has_cc() && frame_->height() == original_height);
5692      return;
5693    }
5694  }
5695
5696  // To make typeof testing for natives implemented in JavaScript really
5697  // efficient, we generate special code for expressions of the form:
5698  // 'typeof <expression> == <string>'.
5699  UnaryOperation* operation = left->AsUnaryOperation();
5700  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
5701      (operation != NULL && operation->op() == Token::TYPEOF) &&
5702      (right->AsLiteral() != NULL &&
5703       right->AsLiteral()->handle()->IsString())) {
5704    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
5705
5706    // Load the operand, move it to a register.
5707    LoadTypeofExpression(operation->expression());
5708    Register tos = frame_->PopToRegister();
5709
5710    // JumpTargets can't cope with register allocation yet.
5711    frame_->SpillAll();
5712
5713    Register scratch = VirtualFrame::scratch0();
5714
5715    if (check->Equals(Heap::number_symbol())) {
5716      __ tst(tos, Operand(kSmiTagMask));
5717      true_target()->Branch(eq);
5718      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5719      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5720      __ cmp(tos, ip);
5721      cc_reg_ = eq;
5722
5723    } else if (check->Equals(Heap::string_symbol())) {
5724      __ tst(tos, Operand(kSmiTagMask));
5725      false_target()->Branch(eq);
5726
5727      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5728
5729      // It can be an undetectable string object.
5730      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5731      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5732      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
5733      false_target()->Branch(eq);
5734
5735      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
5736      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
5737      cc_reg_ = lt;
5738
5739    } else if (check->Equals(Heap::boolean_symbol())) {
5740      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5741      __ cmp(tos, ip);
5742      true_target()->Branch(eq);
5743      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5744      __ cmp(tos, ip);
5745      cc_reg_ = eq;
5746
5747    } else if (check->Equals(Heap::undefined_symbol())) {
5748      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5749      __ cmp(tos, ip);
5750      true_target()->Branch(eq);
5751
5752      __ tst(tos, Operand(kSmiTagMask));
5753      false_target()->Branch(eq);
5754
5755      // It can be an undetectable object.
5756      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
5757      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
5758      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
5759      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
5760
5761      cc_reg_ = eq;
5762
5763    } else if (check->Equals(Heap::function_symbol())) {
5764      __ tst(tos, Operand(kSmiTagMask));
5765      false_target()->Branch(eq);
5766      Register map_reg = scratch;
5767      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
5768      true_target()->Branch(eq);
5769      // Regular expressions are callable so typeof == 'function'.
5770      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
5771      cc_reg_ = eq;
5772
5773    } else if (check->Equals(Heap::object_symbol())) {
5774      __ tst(tos, Operand(kSmiTagMask));
5775      false_target()->Branch(eq);
5776
5777      __ LoadRoot(ip, Heap::kNullValueRootIndex);
5778      __ cmp(tos, ip);
5779      true_target()->Branch(eq);
5780
5781      Register map_reg = scratch;
5782      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
5783      false_target()->Branch(eq);
5784
5785      // It can be an undetectable object.
5786      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
5787      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
5788      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
5789      false_target()->Branch(eq);
5790
5791      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
5792      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
5793      false_target()->Branch(lt);
5794      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
5795      cc_reg_ = le;
5796
5797    } else {
5798      // Uncommon case: typeof testing against a string literal that is
5799      // never returned from the typeof operator.
5800      false_target()->Jump();
5801    }
5802    ASSERT(!has_valid_frame() ||
5803           (has_cc() && frame_->height() == original_height));
5804    return;
5805  }
5806
5807  switch (op) {
5808    case Token::EQ:
5809      Comparison(eq, left, right, false);
5810      break;
5811
5812    case Token::LT:
5813      Comparison(lt, left, right);
5814      break;
5815
5816    case Token::GT:
5817      Comparison(gt, left, right);
5818      break;
5819
5820    case Token::LTE:
5821      Comparison(le, left, right);
5822      break;
5823
5824    case Token::GTE:
5825      Comparison(ge, left, right);
5826      break;
5827
5828    case Token::EQ_STRICT:
5829      Comparison(eq, left, right, true);
5830      break;
5831
5832    case Token::IN: {
5833      VirtualFrame::SpilledScope scope(frame_);
5834      Load(left);
5835      Load(right);
5836      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
5837      frame_->EmitPush(r0);
5838      break;
5839    }
5840
5841    case Token::INSTANCEOF: {
5842      VirtualFrame::SpilledScope scope(frame_);
5843      Load(left);
5844      Load(right);
5845      InstanceofStub stub;
5846      frame_->CallStub(&stub, 2);
5847      // At this point if instanceof succeeded then r0 == 0.
5848      __ tst(r0, Operand(r0));
5849      cc_reg_ = eq;
5850      break;
5851    }
5852
5853    default:
5854      UNREACHABLE();
5855  }
5856  ASSERT((has_cc() && frame_->height() == original_height) ||
5857         (!has_cc() && frame_->height() == original_height + 1));
5858}
5859
5860
5861class DeferredReferenceGetNamedValue: public DeferredCode {
5862 public:
5863  explicit DeferredReferenceGetNamedValue(Register receiver,
5864                                          Handle<String> name)
5865      : receiver_(receiver), name_(name) {
5866    set_comment("[ DeferredReferenceGetNamedValue");
5867  }
5868
5869  virtual void Generate();
5870
5871 private:
5872  Register receiver_;
5873  Handle<String> name_;
5874};
5875
5876
5877// Convention for this is that on entry the receiver is in a register that
5878// is not used by the stack.  On exit the answer is found in that same
5879// register and the stack has the same height.
5880void DeferredReferenceGetNamedValue::Generate() {
5881#ifdef DEBUG
5882  int expected_height = frame_state()->frame()->height();
5883#endif
5884  VirtualFrame copied_frame(*frame_state()->frame());
5885  copied_frame.SpillAll();
5886
5887  Register scratch1 = VirtualFrame::scratch0();
5888  Register scratch2 = VirtualFrame::scratch1();
5889  ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
5890  __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
5891  __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
5892
5893  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
5894  __ Move(r0, receiver_);
5895  __ mov(r2, Operand(name_));
5896
5897  // The rest of the instructions in the deferred code must be together.
5898  { Assembler::BlockConstPoolScope block_const_pool(masm_);
5899    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5900    __ Call(ic, RelocInfo::CODE_TARGET);
5901    // The call must be followed by a nop(1) instruction to indicate that the
5902    // in-object has been inlined.
5903    __ nop(PROPERTY_ACCESS_INLINED);
5904
5905    // At this point the answer is in r0.  We move it to the expected register
5906    // if necessary.
5907    __ Move(receiver_, r0);
5908
5909    // Now go back to the frame that we entered with.  This will not overwrite
5910    // the receiver register since that register was not in use when we came
5911    // in.  The instructions emitted by this merge are skipped over by the
5912    // inline load patching mechanism when looking for the branch instruction
5913    // that tells it where the code to patch is.
5914    copied_frame.MergeTo(frame_state()->frame());
5915
5916    // Block the constant pool for one more instruction after leaving this
5917    // constant pool block scope to include the branch instruction ending the
5918    // deferred code.
5919    __ BlockConstPoolFor(1);
5920  }
5921  ASSERT_EQ(expected_height, frame_state()->frame()->height());
5922}
5923
5924
5925class DeferredReferenceGetKeyedValue: public DeferredCode {
5926 public:
5927  DeferredReferenceGetKeyedValue(Register key, Register receiver)
5928      : key_(key), receiver_(receiver) {
5929    set_comment("[ DeferredReferenceGetKeyedValue");
5930  }
5931
5932  virtual void Generate();
5933
5934 private:
5935  Register key_;
5936  Register receiver_;
5937};
5938
5939
5940void DeferredReferenceGetKeyedValue::Generate() {
5941  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
5942         (key_.is(r1) && receiver_.is(r0)));
5943
5944  Register scratch1 = VirtualFrame::scratch0();
5945  Register scratch2 = VirtualFrame::scratch1();
5946  __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
5947  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
5948
5949  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
5950  // convention.
5951  if (key_.is(r1)) {
5952    __ Swap(r0, r1, ip);
5953  }
5954
5955  // The rest of the instructions in the deferred code must be together.
5956  { Assembler::BlockConstPoolScope block_const_pool(masm_);
5957    // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
5958    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
5959    __ Call(ic, RelocInfo::CODE_TARGET);
5960    // The call must be followed by a nop instruction to indicate that the
5961    // keyed load has been inlined.
5962    __ nop(PROPERTY_ACCESS_INLINED);
5963
5964    // Block the constant pool for one more instruction after leaving this
5965    // constant pool block scope to include the branch instruction ending the
5966    // deferred code.
5967    __ BlockConstPoolFor(1);
5968  }
5969}
5970
5971
5972class DeferredReferenceSetKeyedValue: public DeferredCode {
5973 public:
5974  DeferredReferenceSetKeyedValue(Register value,
5975                                 Register key,
5976                                 Register receiver)
5977      : value_(value), key_(key), receiver_(receiver) {
5978    set_comment("[ DeferredReferenceSetKeyedValue");
5979  }
5980
5981  virtual void Generate();
5982
5983 private:
5984  Register value_;
5985  Register key_;
5986  Register receiver_;
5987};
5988
5989
5990void DeferredReferenceSetKeyedValue::Generate() {
5991  Register scratch1 = VirtualFrame::scratch0();
5992  Register scratch2 = VirtualFrame::scratch1();
5993  __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
5994  __ IncrementCounter(
5995      &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
5996
5997  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
5998  // calling convention.
5999  if (value_.is(r1)) {
6000    __ Swap(r0, r1, ip);
6001  }
6002  ASSERT(receiver_.is(r2));
6003
6004  // The rest of the instructions in the deferred code must be together.
6005  { Assembler::BlockConstPoolScope block_const_pool(masm_);
6006    // Call keyed store IC. It has the arguments value, key and receiver in r0,
6007    // r1 and r2.
6008    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
6009    __ Call(ic, RelocInfo::CODE_TARGET);
6010    // The call must be followed by a nop instruction to indicate that the
6011    // keyed store has been inlined.
6012    __ nop(PROPERTY_ACCESS_INLINED);
6013
6014    // Block the constant pool for one more instruction after leaving this
6015    // constant pool block scope to include the branch instruction ending the
6016    // deferred code.
6017    __ BlockConstPoolFor(1);
6018  }
6019}
6020
6021
6022// Consumes the top of stack (the receiver) and pushes the result instead.
6023void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
6024  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6025    Comment cmnt(masm(), "[ Load from named Property");
6026    // Setup the name register and call load IC.
6027    frame_->CallLoadIC(name,
6028                       is_contextual
6029                           ? RelocInfo::CODE_TARGET_CONTEXT
6030                           : RelocInfo::CODE_TARGET);
6031    frame_->EmitPush(r0);  // Push answer.
6032  } else {
6033    // Inline the in-object property case.
6034    Comment cmnt(masm(), "[ Inlined named property load");
6035
6036    // Counter will be decremented in the deferred code. Placed here to avoid
6037    // having it in the instruction stream below where patching will occur.
6038    __ IncrementCounter(&Counters::named_load_inline, 1,
6039                        frame_->scratch0(), frame_->scratch1());
6040
6041    // The following instructions are the inlined load of an in-object property.
6042    // Parts of this code is patched, so the exact instructions generated needs
6043    // to be fixed. Therefore the instruction pool is blocked when generating
6044    // this code
6045
6046    // Load the receiver from the stack.
6047    Register receiver = frame_->PopToRegister();
6048
6049    DeferredReferenceGetNamedValue* deferred =
6050        new DeferredReferenceGetNamedValue(receiver, name);
6051
6052#ifdef DEBUG
6053    int kInlinedNamedLoadInstructions = 7;
6054    Label check_inlined_codesize;
6055    masm_->bind(&check_inlined_codesize);
6056#endif
6057
6058    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6059      // Check that the receiver is a heap object.
6060      __ tst(receiver, Operand(kSmiTagMask));
6061      deferred->Branch(eq);
6062
6063      Register scratch = VirtualFrame::scratch0();
6064      Register scratch2 = VirtualFrame::scratch1();
6065
6066      // Check the map. The null map used below is patched by the inline cache
6067      // code.  Therefore we can't use a LoadRoot call.
6068      __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
6069      __ mov(scratch2, Operand(Factory::null_value()));
6070      __ cmp(scratch, scratch2);
6071      deferred->Branch(ne);
6072
6073      // Initially use an invalid index. The index will be patched by the
6074      // inline cache code.
6075      __ ldr(receiver, MemOperand(receiver, 0));
6076
6077      // Make sure that the expected number of instructions are generated.
6078      ASSERT_EQ(kInlinedNamedLoadInstructions,
6079                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6080    }
6081
6082    deferred->BindExit();
6083    // At this point the receiver register has the result, either from the
6084    // deferred code or from the inlined code.
6085    frame_->EmitPush(receiver);
6086  }
6087}
6088
6089
6090void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
6091#ifdef DEBUG
6092  int expected_height = frame_->height() - (is_contextual ? 1 : 2);
6093#endif
6094  frame_->CallStoreIC(name, is_contextual);
6095
6096  ASSERT_EQ(expected_height, frame_->height());
6097}
6098
6099
6100void CodeGenerator::EmitKeyedLoad() {
6101  if (loop_nesting() == 0) {
6102    Comment cmnt(masm_, "[ Load from keyed property");
6103    frame_->CallKeyedLoadIC();
6104  } else {
6105    // Inline the keyed load.
6106    Comment cmnt(masm_, "[ Inlined load from keyed property");
6107
6108    // Counter will be decremented in the deferred code. Placed here to avoid
6109    // having it in the instruction stream below where patching will occur.
6110    __ IncrementCounter(&Counters::keyed_load_inline, 1,
6111                        frame_->scratch0(), frame_->scratch1());
6112
6113    // Load the key and receiver from the stack.
6114    bool key_is_known_smi = frame_->KnownSmiAt(0);
6115    Register key = frame_->PopToRegister();
6116    Register receiver = frame_->PopToRegister(key);
6117    VirtualFrame::SpilledScope spilled(frame_);
6118
6119    // The deferred code expects key and receiver in registers.
6120    DeferredReferenceGetKeyedValue* deferred =
6121        new DeferredReferenceGetKeyedValue(key, receiver);
6122
6123    // Check that the receiver is a heap object.
6124    __ tst(receiver, Operand(kSmiTagMask));
6125    deferred->Branch(eq);
6126
6127    // The following instructions are the part of the inlined load keyed
6128    // property code which can be patched. Therefore the exact number of
6129    // instructions generated need to be fixed, so the constant pool is blocked
6130    // while generating this code.
6131    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6132      Register scratch1 = VirtualFrame::scratch0();
6133      Register scratch2 = VirtualFrame::scratch1();
6134      // Check the map. The null map used below is patched by the inline cache
6135      // code.
6136      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
6137
6138      // Check that the key is a smi.
6139      if (!key_is_known_smi) {
6140        __ tst(key, Operand(kSmiTagMask));
6141        deferred->Branch(ne);
6142      }
6143
6144#ifdef DEBUG
6145      Label check_inlined_codesize;
6146      masm_->bind(&check_inlined_codesize);
6147#endif
6148      __ mov(scratch2, Operand(Factory::null_value()));
6149      __ cmp(scratch1, scratch2);
6150      deferred->Branch(ne);
6151
6152      // Get the elements array from the receiver and check that it
6153      // is not a dictionary.
6154      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
6155      __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
6156      __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
6157      __ cmp(scratch2, ip);
6158      deferred->Branch(ne);
6159
6160      // Check that key is within bounds. Use unsigned comparison to handle
6161      // negative keys.
6162      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
6163      __ cmp(scratch2, key);
6164      deferred->Branch(ls);  // Unsigned less equal.
6165
6166      // Load and check that the result is not the hole (key is a smi).
6167      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
6168      __ add(scratch1,
6169             scratch1,
6170             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6171      __ ldr(scratch1,
6172             MemOperand(scratch1, key, LSL,
6173                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
6174      __ cmp(scratch1, scratch2);
6175      deferred->Branch(eq);
6176
6177      __ mov(r0, scratch1);
6178      // Make sure that the expected number of instructions are generated.
6179      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
6180                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6181    }
6182
6183    deferred->BindExit();
6184  }
6185}
6186
6187
6188void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
6189  // Generate inlined version of the keyed store if the code is in a loop
6190  // and the key is likely to be a smi.
6191  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
6192    // Inline the keyed store.
6193    Comment cmnt(masm_, "[ Inlined store to keyed property");
6194
6195    Register scratch1 = VirtualFrame::scratch0();
6196    Register scratch2 = VirtualFrame::scratch1();
6197    Register scratch3 = r3;
6198
6199    // Counter will be decremented in the deferred code. Placed here to avoid
6200    // having it in the instruction stream below where patching will occur.
6201    __ IncrementCounter(&Counters::keyed_store_inline, 1,
6202                        scratch1, scratch2);
6203
6204    // Load the value, key and receiver from the stack.
6205    Register value = frame_->PopToRegister();
6206    Register key = frame_->PopToRegister(value);
6207    Register receiver = r2;
6208    frame_->EmitPop(receiver);
6209    VirtualFrame::SpilledScope spilled(frame_);
6210
6211    // The deferred code expects value, key and receiver in registers.
6212    DeferredReferenceSetKeyedValue* deferred =
6213        new DeferredReferenceSetKeyedValue(value, key, receiver);
6214
6215    // Check that the value is a smi. As this inlined code does not set the
6216    // write barrier it is only possible to store smi values.
6217    __ tst(value, Operand(kSmiTagMask));
6218    deferred->Branch(ne);
6219
6220    // Check that the key is a smi.
6221    __ tst(key, Operand(kSmiTagMask));
6222    deferred->Branch(ne);
6223
6224    // Check that the receiver is a heap object.
6225    __ tst(receiver, Operand(kSmiTagMask));
6226    deferred->Branch(eq);
6227
6228    // Check that the receiver is a JSArray.
6229    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
6230    deferred->Branch(ne);
6231
6232    // Check that the key is within bounds. Both the key and the length of
6233    // the JSArray are smis. Use unsigned comparison to handle negative keys.
6234    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
6235    __ cmp(scratch1, key);
6236    deferred->Branch(ls);  // Unsigned less equal.
6237
6238    // The following instructions are the part of the inlined store keyed
6239    // property code which can be patched. Therefore the exact number of
6240    // instructions generated need to be fixed, so the constant pool is blocked
6241    // while generating this code.
6242    { Assembler::BlockConstPoolScope block_const_pool(masm_);
6243      // Get the elements array from the receiver and check that it
6244      // is not a dictionary.
6245      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
6246      __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
6247      // Read the fixed array map from the constant pool (not from the root
6248      // array) so that the value can be patched.  When debugging, we patch this
6249      // comparison to always fail so that we will hit the IC call in the
6250      // deferred code which will allow the debugger to break for fast case
6251      // stores.
6252#ifdef DEBUG
6253    Label check_inlined_codesize;
6254    masm_->bind(&check_inlined_codesize);
6255#endif
6256      __ mov(scratch3, Operand(Factory::fixed_array_map()));
6257      __ cmp(scratch2, scratch3);
6258      deferred->Branch(ne);
6259
6260      // Store the value.
6261      __ add(scratch1, scratch1,
6262             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6263      __ str(value,
6264             MemOperand(scratch1, key, LSL,
6265                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
6266
6267      // Make sure that the expected number of instructions are generated.
6268      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
6269                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6270    }
6271
6272    deferred->BindExit();
6273  } else {
6274    frame()->CallKeyedStoreIC();
6275  }
6276}
6277
6278
6279#ifdef DEBUG
6280bool CodeGenerator::HasValidEntryRegisters() { return true; }
6281#endif
6282
6283
6284#undef __
6285#define __ ACCESS_MASM(masm)
6286
6287
6288Handle<String> Reference::GetName() {
6289  ASSERT(type_ == NAMED);
6290  Property* property = expression_->AsProperty();
6291  if (property == NULL) {
6292    // Global variable reference treated as a named property reference.
6293    VariableProxy* proxy = expression_->AsVariableProxy();
6294    ASSERT(proxy->AsVariable() != NULL);
6295    ASSERT(proxy->AsVariable()->is_global());
6296    return proxy->name();
6297  } else {
6298    Literal* raw_name = property->key()->AsLiteral();
6299    ASSERT(raw_name != NULL);
6300    return Handle<String>(String::cast(*raw_name->handle()));
6301  }
6302}
6303
6304
6305void Reference::DupIfPersist() {
6306  if (persist_after_get_) {
6307    switch (type_) {
6308      case KEYED:
6309        cgen_->frame()->Dup2();
6310        break;
6311      case NAMED:
6312        cgen_->frame()->Dup();
6313        // Fall through.
6314      case UNLOADED:
6315      case ILLEGAL:
6316      case SLOT:
6317        // Do nothing.
6318        ;
6319    }
6320  } else {
6321    set_unloaded();
6322  }
6323}
6324
6325
6326void Reference::GetValue() {
6327  ASSERT(cgen_->HasValidEntryRegisters());
6328  ASSERT(!is_illegal());
6329  ASSERT(!cgen_->has_cc());
6330  MacroAssembler* masm = cgen_->masm();
6331  Property* property = expression_->AsProperty();
6332  if (property != NULL) {
6333    cgen_->CodeForSourcePosition(property->position());
6334  }
6335
6336  switch (type_) {
6337    case SLOT: {
6338      Comment cmnt(masm, "[ Load from Slot");
6339      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6340      ASSERT(slot != NULL);
6341      DupIfPersist();
6342      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
6343      break;
6344    }
6345
6346    case NAMED: {
6347      Variable* var = expression_->AsVariableProxy()->AsVariable();
6348      bool is_global = var != NULL;
6349      ASSERT(!is_global || var->is_global());
6350      Handle<String> name = GetName();
6351      DupIfPersist();
6352      cgen_->EmitNamedLoad(name, is_global);
6353      break;
6354    }
6355
6356    case KEYED: {
6357      ASSERT(property != NULL);
6358      DupIfPersist();
6359      cgen_->EmitKeyedLoad();
6360      cgen_->frame()->EmitPush(r0);
6361      break;
6362    }
6363
6364    default:
6365      UNREACHABLE();
6366  }
6367}
6368
6369
6370void Reference::SetValue(InitState init_state) {
6371  ASSERT(!is_illegal());
6372  ASSERT(!cgen_->has_cc());
6373  MacroAssembler* masm = cgen_->masm();
6374  VirtualFrame* frame = cgen_->frame();
6375  Property* property = expression_->AsProperty();
6376  if (property != NULL) {
6377    cgen_->CodeForSourcePosition(property->position());
6378  }
6379
6380  switch (type_) {
6381    case SLOT: {
6382      Comment cmnt(masm, "[ Store to Slot");
6383      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6384      cgen_->StoreToSlot(slot, init_state);
6385      set_unloaded();
6386      break;
6387    }
6388
6389    case NAMED: {
6390      Comment cmnt(masm, "[ Store to named Property");
6391      cgen_->EmitNamedStore(GetName(), false);
6392      frame->EmitPush(r0);
6393      set_unloaded();
6394      break;
6395    }
6396
6397    case KEYED: {
6398      Comment cmnt(masm, "[ Store to keyed Property");
6399      Property* property = expression_->AsProperty();
6400      ASSERT(property != NULL);
6401      cgen_->CodeForSourcePosition(property->position());
6402      cgen_->EmitKeyedStore(property->key()->type());
6403      frame->EmitPush(r0);
6404      set_unloaded();
6405      break;
6406    }
6407
6408    default:
6409      UNREACHABLE();
6410  }
6411}
6412
6413
6414void FastNewClosureStub::Generate(MacroAssembler* masm) {
6415  // Create a new closure from the given function info in new
6416  // space. Set the context to the current context in cp.
6417  Label gc;
6418
6419  // Pop the function info from the stack.
6420  __ pop(r3);
6421
6422  // Attempt to allocate new JSFunction in new space.
6423  __ AllocateInNewSpace(JSFunction::kSize,
6424                        r0,
6425                        r1,
6426                        r2,
6427                        &gc,
6428                        TAG_OBJECT);
6429
6430  // Compute the function map in the current global context and set that
6431  // as the map of the allocated object.
6432  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6433  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
6434  __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
6435  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6436
6437  // Initialize the rest of the function. We don't have to update the
6438  // write barrier because the allocated object is in new space.
6439  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
6440  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
6441  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
6442  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
6443  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
6444  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
6445  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
6446  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
6447
6448  // Return result. The argument function info has been popped already.
6449  __ Ret();
6450
6451  // Create a new closure through the slower runtime call.
6452  __ bind(&gc);
6453  __ Push(cp, r3);
6454  __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
6455}
6456
6457
6458void FastNewContextStub::Generate(MacroAssembler* masm) {
6459  // Try to allocate the context in new space.
6460  Label gc;
6461  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
6462
6463  // Attempt to allocate the context in new space.
6464  __ AllocateInNewSpace(FixedArray::SizeFor(length),
6465                        r0,
6466                        r1,
6467                        r2,
6468                        &gc,
6469                        TAG_OBJECT);
6470
6471  // Load the function from the stack.
6472  __ ldr(r3, MemOperand(sp, 0));
6473
6474  // Setup the object header.
6475  __ LoadRoot(r2, Heap::kContextMapRootIndex);
6476  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6477  __ mov(r2, Operand(Smi::FromInt(length)));
6478  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
6479
6480  // Setup the fixed slots.
6481  __ mov(r1, Operand(Smi::FromInt(0)));
6482  __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
6483  __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
6484  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
6485  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
6486
6487  // Copy the global object from the surrounding context.
6488  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
6489  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
6490
6491  // Initialize the rest of the slots to undefined.
6492  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
6493  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
6494    __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
6495  }
6496
6497  // Remove the on-stack argument and return.
6498  __ mov(cp, r0);
6499  __ pop();
6500  __ Ret();
6501
6502  // Need to collect. Call into runtime system.
6503  __ bind(&gc);
6504  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
6505}
6506
6507
6508void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
6509  // Stack layout on entry:
6510  //
6511  // [sp]: constant elements.
6512  // [sp + kPointerSize]: literal index.
6513  // [sp + (2 * kPointerSize)]: literals array.
6514
6515  // All sizes here are multiples of kPointerSize.
6516  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
6517  int size = JSArray::kSize + elements_size;
6518
6519  // Load boilerplate object into r3 and check if we need to create a
6520  // boilerplate.
6521  Label slow_case;
6522  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
6523  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
6524  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
6525  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
6526  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6527  __ cmp(r3, ip);
6528  __ b(eq, &slow_case);
6529
6530  // Allocate both the JS array and the elements array in one big
6531  // allocation. This avoids multiple limit checks.
6532  __ AllocateInNewSpace(size,
6533                        r0,
6534                        r1,
6535                        r2,
6536                        &slow_case,
6537                        TAG_OBJECT);
6538
6539  // Copy the JS array part.
6540  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
6541    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
6542      __ ldr(r1, FieldMemOperand(r3, i));
6543      __ str(r1, FieldMemOperand(r0, i));
6544    }
6545  }
6546
6547  if (length_ > 0) {
6548    // Get hold of the elements array of the boilerplate and setup the
6549    // elements pointer in the resulting object.
6550    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
6551    __ add(r2, r0, Operand(JSArray::kSize));
6552    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
6553
6554    // Copy the elements array.
6555    for (int i = 0; i < elements_size; i += kPointerSize) {
6556      __ ldr(r1, FieldMemOperand(r3, i));
6557      __ str(r1, FieldMemOperand(r2, i));
6558    }
6559  }
6560
6561  // Return and remove the on-stack parameters.
6562  __ add(sp, sp, Operand(3 * kPointerSize));
6563  __ Ret();
6564
6565  __ bind(&slow_case);
6566  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
6567}
6568
6569
6570// Takes a Smi and converts to an IEEE 64 bit floating point value in two
6571// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
6572// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
6573// scratch register.  Destroys the source register.  No GC occurs during this
6574// stub so you don't have to set up the frame.
6575class ConvertToDoubleStub : public CodeStub {
6576 public:
6577  ConvertToDoubleStub(Register result_reg_1,
6578                      Register result_reg_2,
6579                      Register source_reg,
6580                      Register scratch_reg)
6581      : result1_(result_reg_1),
6582        result2_(result_reg_2),
6583        source_(source_reg),
6584        zeros_(scratch_reg) { }
6585
6586 private:
6587  Register result1_;
6588  Register result2_;
6589  Register source_;
6590  Register zeros_;
6591
6592  // Minor key encoding in 16 bits.
6593  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
6594  class OpBits: public BitField<Token::Value, 2, 14> {};
6595
6596  Major MajorKey() { return ConvertToDouble; }
6597  int MinorKey() {
6598    // Encode the parameters in a unique 16 bit value.
6599    return  result1_.code() +
6600           (result2_.code() << 4) +
6601           (source_.code() << 8) +
6602           (zeros_.code() << 12);
6603  }
6604
6605  void Generate(MacroAssembler* masm);
6606
6607  const char* GetName() { return "ConvertToDoubleStub"; }
6608
6609#ifdef DEBUG
6610  void Print() { PrintF("ConvertToDoubleStub\n"); }
6611#endif
6612};
6613
6614
6615void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
6616#ifndef BIG_ENDIAN_FLOATING_POINT
6617  Register exponent = result1_;
6618  Register mantissa = result2_;
6619#else
6620  Register exponent = result2_;
6621  Register mantissa = result1_;
6622#endif
6623  Label not_special;
6624  // Convert from Smi to integer.
6625  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
6626  // Move sign bit from source to destination.  This works because the sign bit
6627  // in the exponent word of the double has the same position and polarity as
6628  // the 2's complement sign bit in a Smi.
6629  ASSERT(HeapNumber::kSignMask == 0x80000000u);
6630  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
6631  // Subtract from 0 if source was negative.
6632  __ rsb(source_, source_, Operand(0), LeaveCC, ne);
6633
6634  // We have -1, 0 or 1, which we treat specially. Register source_ contains
6635  // absolute value: it is either equal to 1 (special case of -1 and 1),
6636  // greater than 1 (not a special case) or less than 1 (special case of 0).
6637  __ cmp(source_, Operand(1));
6638  __ b(gt, &not_special);
6639
6640  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
6641  static const uint32_t exponent_word_for_1 =
6642      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
6643  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
6644  // 1, 0 and -1 all have 0 for the second word.
6645  __ mov(mantissa, Operand(0));
6646  __ Ret();
6647
6648  __ bind(&not_special);
6649  // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
6650  // Gets the wrong answer for 0, but we already checked for that case above.
6651  __ CountLeadingZeros(source_, mantissa, zeros_);
6652  // Compute exponent and or it into the exponent register.
6653  // We use mantissa as a scratch register here.  Use a fudge factor to
6654  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
6655  // that fit in the ARM's constant field.
6656  int fudge = 0x400;
6657  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
6658  __ add(mantissa, mantissa, Operand(fudge));
6659  __ orr(exponent,
6660         exponent,
6661         Operand(mantissa, LSL, HeapNumber::kExponentShift));
6662  // Shift up the source chopping the top bit off.
6663  __ add(zeros_, zeros_, Operand(1));
6664  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
6665  __ mov(source_, Operand(source_, LSL, zeros_));
6666  // Compute lower part of fraction (last 12 bits).
6667  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
6668  // And the top (top 20 bits).
6669  __ orr(exponent,
6670         exponent,
6671         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
6672  __ Ret();
6673}
6674
6675
6676// See comment for class.
6677void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
6678  Label max_negative_int;
6679  // the_int_ has the answer which is a signed int32 but not a Smi.
6680  // We test for the special value that has a different exponent.  This test
6681  // has the neat side effect of setting the flags according to the sign.
6682  ASSERT(HeapNumber::kSignMask == 0x80000000u);
6683  __ cmp(the_int_, Operand(0x80000000u));
6684  __ b(eq, &max_negative_int);
6685  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
6686  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
6687  uint32_t non_smi_exponent =
6688      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
6689  __ mov(scratch_, Operand(non_smi_exponent));
6690  // Set the sign bit in scratch_ if the value was negative.
6691  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
6692  // Subtract from 0 if the value was negative.
6693  __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
6694  // We should be masking the implict first digit of the mantissa away here,
6695  // but it just ends up combining harmlessly with the last digit of the
6696  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
6697  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
6698  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
6699  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
6700  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
6701  __ str(scratch_, FieldMemOperand(the_heap_number_,
6702                                   HeapNumber::kExponentOffset));
6703  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
6704  __ str(scratch_, FieldMemOperand(the_heap_number_,
6705                                   HeapNumber::kMantissaOffset));
6706  __ Ret();
6707
6708  __ bind(&max_negative_int);
6709  // The max negative int32 is stored as a positive number in the mantissa of
6710  // a double because it uses a sign bit instead of using two's complement.
6711  // The actual mantissa bits stored are all 0 because the implicit most
6712  // significant 1 bit is not stored.
6713  non_smi_exponent += 1 << HeapNumber::kExponentShift;
6714  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
6715  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
6716  __ mov(ip, Operand(0));
6717  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
6718  __ Ret();
6719}
6720
6721
6722// Handle the case where the lhs and rhs are the same object.
6723// Equality is almost reflexive (everything but NaN), so this is a test
6724// for "identity and not NaN".
6725static void EmitIdenticalObjectComparison(MacroAssembler* masm,
6726                                          Label* slow,
6727                                          Condition cc,
6728                                          bool never_nan_nan) {
6729  Label not_identical;
6730  Label heap_number, return_equal;
6731  __ cmp(r0, r1);
6732  __ b(ne, &not_identical);
6733
6734  // The two objects are identical.  If we know that one of them isn't NaN then
6735  // we now know they test equal.
6736  if (cc != eq || !never_nan_nan) {
6737    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6738    // so we do the second best thing - test it ourselves.
6739    // They are both equal and they are not both Smis so both of them are not
6740    // Smis.  If it's not a heap number, then return equal.
6741    if (cc == lt || cc == gt) {
6742      __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
6743      __ b(ge, slow);
6744    } else {
6745      __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6746      __ b(eq, &heap_number);
6747      // Comparing JS objects with <=, >= is complicated.
6748      if (cc != eq) {
6749        __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
6750        __ b(ge, slow);
6751        // Normally here we fall through to return_equal, but undefined is
6752        // special: (undefined == undefined) == true, but
6753        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
6754        if (cc == le || cc == ge) {
6755          __ cmp(r4, Operand(ODDBALL_TYPE));
6756          __ b(ne, &return_equal);
6757          __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
6758          __ cmp(r0, r2);
6759          __ b(ne, &return_equal);
6760          if (cc == le) {
6761            // undefined <= undefined should fail.
6762            __ mov(r0, Operand(GREATER));
6763          } else  {
6764            // undefined >= undefined should fail.
6765            __ mov(r0, Operand(LESS));
6766          }
6767          __ mov(pc, Operand(lr));       // Return.
6768        }
6769      }
6770    }
6771  }
6772
6773  __ bind(&return_equal);
6774  if (cc == lt) {
6775    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
6776  } else if (cc == gt) {
6777    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
6778  } else {
6779    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
6780  }
6781  __ mov(pc, Operand(lr));  // Return.
6782
6783  if (cc != eq || !never_nan_nan) {
6784    // For less and greater we don't have to check for NaN since the result of
6785    // x < x is false regardless.  For the others here is some code to check
6786    // for NaN.
6787    if (cc != lt && cc != gt) {
6788      __ bind(&heap_number);
6789      // It is a heap number, so return non-equal if it's NaN and equal if it's
6790      // not NaN.
6791
6792      // The representation of NaN values has all exponent bits (52..62) set,
6793      // and not all mantissa bits (0..51) clear.
6794      // Read top bits of double representation (second word of value).
6795      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
6796      // Test that exponent bits are all set.
6797      __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
6798      // NaNs have all-one exponents so they sign extend to -1.
6799      __ cmp(r3, Operand(-1));
6800      __ b(ne, &return_equal);
6801
6802      // Shift out flag and all exponent bits, retaining only mantissa.
6803      __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
6804      // Or with all low-bits of mantissa.
6805      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
6806      __ orr(r0, r3, Operand(r2), SetCC);
6807      // For equal we already have the right value in r0:  Return zero (equal)
6808      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
6809      // not (it's a NaN).  For <= and >= we need to load r0 with the failing
6810      // value if it's a NaN.
6811      if (cc != eq) {
6812        // All-zero means Infinity means equal.
6813        __ mov(pc, Operand(lr), LeaveCC, eq);  // Return equal
6814        if (cc == le) {
6815          __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
6816        } else {
6817          __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
6818        }
6819      }
6820      __ mov(pc, Operand(lr));  // Return.
6821    }
6822    // No fall through here.
6823  }
6824
6825  __ bind(&not_identical);
6826}
6827
6828
6829// See comment at call site.
6830static void EmitSmiNonsmiComparison(MacroAssembler* masm,
6831                                    Label* lhs_not_nan,
6832                                    Label* slow,
6833                                    bool strict) {
6834  Label rhs_is_smi;
6835  __ tst(r0, Operand(kSmiTagMask));
6836  __ b(eq, &rhs_is_smi);
6837
6838  // Lhs is a Smi.  Check whether the rhs is a heap number.
6839  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
6840  if (strict) {
6841    // If rhs is not a number and lhs is a Smi then strict equality cannot
6842    // succeed.  Return non-equal (r0 is already not zero)
6843    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
6844  } else {
6845    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
6846    // the runtime.
6847    __ b(ne, slow);
6848  }
6849
6850  // Lhs (r1) is a smi, rhs (r0) is a number.
6851  if (CpuFeatures::IsSupported(VFP3)) {
6852    // Convert lhs to a double in d7              .
6853    CpuFeatures::Scope scope(VFP3);
6854    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
6855    __ vmov(s15, r7);
6856    __ vcvt_f64_s32(d7, s15);
6857    // Load the double from rhs, tagged HeapNumber r0, to d6.
6858    __ sub(r7, r0, Operand(kHeapObjectTag));
6859    __ vldr(d6, r7, HeapNumber::kValueOffset);
6860  } else {
6861    __ push(lr);
6862    // Convert lhs to a double in r2, r3.
6863    __ mov(r7, Operand(r1));
6864    ConvertToDoubleStub stub1(r3, r2, r7, r6);
6865    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
6866    // Load rhs to a double in r0, r1.
6867    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
6868    __ pop(lr);
6869  }
6870
6871  // We now have both loaded as doubles but we can skip the lhs nan check
6872  // since it's a smi.
6873  __ jmp(lhs_not_nan);
6874
6875  __ bind(&rhs_is_smi);
6876  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
6877  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
6878  if (strict) {
6879    // If lhs is not a number and rhs is a smi then strict equality cannot
6880    // succeed.  Return non-equal.
6881    __ mov(r0, Operand(1), LeaveCC, ne);  // Non-zero indicates not equal.
6882    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
6883  } else {
6884    // Smi compared non-strictly with a non-smi non-heap-number.  Call
6885    // the runtime.
6886    __ b(ne, slow);
6887  }
6888
6889  // Rhs (r0) is a smi, lhs (r1) is a heap number.
6890  if (CpuFeatures::IsSupported(VFP3)) {
6891    // Convert rhs to a double in d6              .
6892    CpuFeatures::Scope scope(VFP3);
6893    // Load the double from lhs, tagged HeapNumber r1, to d7.
6894    __ sub(r7, r1, Operand(kHeapObjectTag));
6895    __ vldr(d7, r7, HeapNumber::kValueOffset);
6896    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
6897    __ vmov(s13, r7);
6898    __ vcvt_f64_s32(d6, s13);
6899  } else {
6900    __ push(lr);
6901    // Load lhs to a double in r2, r3.
6902    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
6903    // Convert rhs to a double in r0, r1.
6904    __ mov(r7, Operand(r0));
6905    ConvertToDoubleStub stub2(r1, r0, r7, r6);
6906    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
6907    __ pop(lr);
6908  }
6909  // Fall through to both_loaded_as_doubles.
6910}
6911
6912
6913void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
6914  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
6915  Register rhs_exponent = exp_first ? r0 : r1;
6916  Register lhs_exponent = exp_first ? r2 : r3;
6917  Register rhs_mantissa = exp_first ? r1 : r0;
6918  Register lhs_mantissa = exp_first ? r3 : r2;
6919  Label one_is_nan, neither_is_nan;
6920
6921  __ Sbfx(r4,
6922          lhs_exponent,
6923          HeapNumber::kExponentShift,
6924          HeapNumber::kExponentBits);
6925  // NaNs have all-one exponents so they sign extend to -1.
6926  __ cmp(r4, Operand(-1));
6927  __ b(ne, lhs_not_nan);
6928  __ mov(r4,
6929         Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6930         SetCC);
6931  __ b(ne, &one_is_nan);
6932  __ cmp(lhs_mantissa, Operand(0));
6933  __ b(ne, &one_is_nan);
6934
6935  __ bind(lhs_not_nan);
6936  __ Sbfx(r4,
6937          rhs_exponent,
6938          HeapNumber::kExponentShift,
6939          HeapNumber::kExponentBits);
6940  // NaNs have all-one exponents so they sign extend to -1.
6941  __ cmp(r4, Operand(-1));
6942  __ b(ne, &neither_is_nan);
6943  __ mov(r4,
6944         Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
6945         SetCC);
6946  __ b(ne, &one_is_nan);
6947  __ cmp(rhs_mantissa, Operand(0));
6948  __ b(eq, &neither_is_nan);
6949
6950  __ bind(&one_is_nan);
6951  // NaN comparisons always fail.
6952  // Load whatever we need in r0 to make the comparison fail.
6953  if (cc == lt || cc == le) {
6954    __ mov(r0, Operand(GREATER));
6955  } else {
6956    __ mov(r0, Operand(LESS));
6957  }
6958  __ mov(pc, Operand(lr));  // Return.
6959
6960  __ bind(&neither_is_nan);
6961}
6962
6963
6964// See comment at call site.
6965static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
6966  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
6967  Register rhs_exponent = exp_first ? r0 : r1;
6968  Register lhs_exponent = exp_first ? r2 : r3;
6969  Register rhs_mantissa = exp_first ? r1 : r0;
6970  Register lhs_mantissa = exp_first ? r3 : r2;
6971
6972  // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
6973  if (cc == eq) {
6974    // Doubles are not equal unless they have the same bit pattern.
6975    // Exception: 0 and -0.
6976    __ cmp(rhs_mantissa, Operand(lhs_mantissa));
6977    __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
6978    // Return non-zero if the numbers are unequal.
6979    __ mov(pc, Operand(lr), LeaveCC, ne);
6980
6981    __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
6982    // If exponents are equal then return 0.
6983    __ mov(pc, Operand(lr), LeaveCC, eq);
6984
6985    // Exponents are unequal.  The only way we can return that the numbers
6986    // are equal is if one is -0 and the other is 0.  We already dealt
6987    // with the case where both are -0 or both are 0.
6988    // We start by seeing if the mantissas (that are equal) or the bottom
6989    // 31 bits of the rhs exponent are non-zero.  If so we return not
6990    // equal.
6991    __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
6992    __ mov(r0, Operand(r4), LeaveCC, ne);
6993    __ mov(pc, Operand(lr), LeaveCC, ne);  // Return conditionally.
6994    // Now they are equal if and only if the lhs exponent is zero in its
6995    // low 31 bits.
6996    __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
6997    __ mov(pc, Operand(lr));
6998  } else {
6999    // Call a native function to do a comparison between two non-NaNs.
7000    // Call C routine that may not cause GC or other trouble.
7001    __ push(lr);
7002    __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
7003    __ CallCFunction(ExternalReference::compare_doubles(), 4);
7004    __ pop(pc);  // Return.
7005  }
7006}
7007
7008
7009// See comment at call site.
7010static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
7011    // If either operand is a JSObject or an oddball value, then they are
7012    // not equal since their pointers are different.
7013    // There is no test for undetectability in strict equality.
7014    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
7015    Label first_non_object;
7016    // Get the type of the first operand into r2 and compare it with
7017    // FIRST_JS_OBJECT_TYPE.
7018    __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
7019    __ b(lt, &first_non_object);
7020
7021    // Return non-zero (r0 is not zero)
7022    Label return_not_equal;
7023    __ bind(&return_not_equal);
7024    __ mov(pc, Operand(lr));  // Return.
7025
7026    __ bind(&first_non_object);
7027    // Check for oddballs: true, false, null, undefined.
7028    __ cmp(r2, Operand(ODDBALL_TYPE));
7029    __ b(eq, &return_not_equal);
7030
7031    __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
7032    __ b(ge, &return_not_equal);
7033
7034    // Check for oddballs: true, false, null, undefined.
7035    __ cmp(r3, Operand(ODDBALL_TYPE));
7036    __ b(eq, &return_not_equal);
7037
7038    // Now that we have the types we might as well check for symbol-symbol.
7039    // Ensure that no non-strings have the symbol bit set.
7040    ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
7041    ASSERT(kSymbolTag != 0);
7042    __ and_(r2, r2, Operand(r3));
7043    __ tst(r2, Operand(kIsSymbolMask));
7044    __ b(ne, &return_not_equal);
7045}
7046
7047
7048// See comment at call site.
7049static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
7050                                       Label* both_loaded_as_doubles,
7051                                       Label* not_heap_numbers,
7052                                       Label* slow) {
7053  __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
7054  __ b(ne, not_heap_numbers);
7055  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
7056  __ cmp(r2, r3);
7057  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
7058
7059  // Both are heap numbers.  Load them up then jump to the code we have
7060  // for that.
7061  if (CpuFeatures::IsSupported(VFP3)) {
7062    CpuFeatures::Scope scope(VFP3);
7063    __ sub(r7, r0, Operand(kHeapObjectTag));
7064    __ vldr(d6, r7, HeapNumber::kValueOffset);
7065    __ sub(r7, r1, Operand(kHeapObjectTag));
7066    __ vldr(d7, r7, HeapNumber::kValueOffset);
7067  } else {
7068    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
7069    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
7070  }
7071  __ jmp(both_loaded_as_doubles);
7072}
7073
7074
7075// Fast negative check for symbol-to-symbol equality.
7076static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
7077  // r2 is object type of r0.
7078  // Ensure that no non-strings have the symbol bit set.
7079  ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
7080  ASSERT(kSymbolTag != 0);
7081  __ tst(r2, Operand(kIsSymbolMask));
7082  __ b(eq, slow);
7083  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
7084  __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
7085  __ tst(r3, Operand(kIsSymbolMask));
7086  __ b(eq, slow);
7087
7088  // Both are symbols.  We already checked they weren't the same pointer
7089  // so they are not equal.
7090  __ mov(r0, Operand(1));   // Non-zero indicates not equal.
7091  __ mov(pc, Operand(lr));  // Return.
7092}
7093
7094
7095void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
7096                                                         Register object,
7097                                                         Register result,
7098                                                         Register scratch1,
7099                                                         Register scratch2,
7100                                                         Register scratch3,
7101                                                         bool object_is_smi,
7102                                                         Label* not_found) {
7103  // Use of registers. Register result is used as a temporary.
7104  Register number_string_cache = result;
7105  Register mask = scratch3;
7106
7107  // Load the number string cache.
7108  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
7109
7110  // Make the hash mask from the length of the number string cache. It
7111  // contains two elements (number and string) for each cache entry.
7112  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
7113  // Divide length by two (length is a smi).
7114  __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
7115  __ sub(mask, mask, Operand(1));  // Make mask.
7116
7117  // Calculate the entry in the number string cache. The hash value in the
7118  // number string cache for smis is just the smi value, and the hash for
7119  // doubles is the xor of the upper and lower words. See
7120  // Heap::GetNumberStringCache.
7121  Label is_smi;
7122  Label load_result_from_cache;
7123  if (!object_is_smi) {
7124    __ BranchOnSmi(object, &is_smi);
7125    if (CpuFeatures::IsSupported(VFP3)) {
7126      CpuFeatures::Scope scope(VFP3);
7127      __ CheckMap(object,
7128                  scratch1,
7129                  Heap::kHeapNumberMapRootIndex,
7130                  not_found,
7131                  true);
7132
7133      ASSERT_EQ(8, kDoubleSize);
7134      __ add(scratch1,
7135             object,
7136             Operand(HeapNumber::kValueOffset - kHeapObjectTag));
7137      __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
7138      __ eor(scratch1, scratch1, Operand(scratch2));
7139      __ and_(scratch1, scratch1, Operand(mask));
7140
7141      // Calculate address of entry in string cache: each entry consists
7142      // of two pointer sized fields.
7143      __ add(scratch1,
7144             number_string_cache,
7145             Operand(scratch1, LSL, kPointerSizeLog2 + 1));
7146
7147      Register probe = mask;
7148      __ ldr(probe,
7149             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
7150      __ BranchOnSmi(probe, not_found);
7151      __ sub(scratch2, object, Operand(kHeapObjectTag));
7152      __ vldr(d0, scratch2, HeapNumber::kValueOffset);
7153      __ sub(probe, probe, Operand(kHeapObjectTag));
7154      __ vldr(d1, probe, HeapNumber::kValueOffset);
7155      __ vcmp(d0, d1);
7156      __ vmrs(pc);
7157      __ b(ne, not_found);  // The cache did not contain this value.
7158      __ b(&load_result_from_cache);
7159    } else {
7160      __ b(not_found);
7161    }
7162  }
7163
7164  __ bind(&is_smi);
7165  Register scratch = scratch1;
7166  __ and_(scratch, mask, Operand(object, ASR, 1));
7167  // Calculate address of entry in string cache: each entry consists
7168  // of two pointer sized fields.
7169  __ add(scratch,
7170         number_string_cache,
7171         Operand(scratch, LSL, kPointerSizeLog2 + 1));
7172
7173  // Check if the entry is the smi we are looking for.
7174  Register probe = mask;
7175  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
7176  __ cmp(object, probe);
7177  __ b(ne, not_found);
7178
7179  // Get the result from the cache.
7180  __ bind(&load_result_from_cache);
7181  __ ldr(result,
7182         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
7183  __ IncrementCounter(&Counters::number_to_string_native,
7184                      1,
7185                      scratch1,
7186                      scratch2);
7187}
7188
7189
7190void NumberToStringStub::Generate(MacroAssembler* masm) {
7191  Label runtime;
7192
7193  __ ldr(r1, MemOperand(sp, 0));
7194
7195  // Generate code to lookup number in the number string cache.
7196  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
7197  __ add(sp, sp, Operand(1 * kPointerSize));
7198  __ Ret();
7199
7200  __ bind(&runtime);
7201  // Handle number to string in the runtime system if not found in the cache.
7202  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
7203}
7204
7205
7206void RecordWriteStub::Generate(MacroAssembler* masm) {
7207  __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
7208  __ Ret();
7209}
7210
7211
7212// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
7213// On exit r0 is 0, positive or negative to indicate the result of
7214// the comparison.
7215void CompareStub::Generate(MacroAssembler* masm) {
7216  Label slow;  // Call builtin.
7217  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
7218
7219  // NOTICE! This code is only reached after a smi-fast-case check, so
7220  // it is certain that at least one operand isn't a smi.
7221
7222  // Handle the case where the objects are identical.  Either returns the answer
7223  // or goes to slow.  Only falls through if the objects were not identical.
7224  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
7225
7226  // If either is a Smi (we know that not both are), then they can only
7227  // be strictly equal if the other is a HeapNumber.
7228  ASSERT_EQ(0, kSmiTag);
7229  ASSERT_EQ(0, Smi::FromInt(0));
7230  __ and_(r2, r0, Operand(r1));
7231  __ tst(r2, Operand(kSmiTagMask));
7232  __ b(ne, &not_smis);
7233  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
7234  // 1) Return the answer.
7235  // 2) Go to slow.
7236  // 3) Fall through to both_loaded_as_doubles.
7237  // 4) Jump to lhs_not_nan.
7238  // In cases 3 and 4 we have found out we were dealing with a number-number
7239  // comparison.  If VFP3 is supported the double values of the numbers have
7240  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
7241  // into r0, r1, r2, and r3.
7242  EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
7243
7244  __ bind(&both_loaded_as_doubles);
7245  // The arguments have been converted to doubles and stored in d6 and d7, if
7246  // VFP3 is supported, or in r0, r1, r2, and r3.
7247  if (CpuFeatures::IsSupported(VFP3)) {
7248    __ bind(&lhs_not_nan);
7249    CpuFeatures::Scope scope(VFP3);
7250    Label no_nan;
7251    // ARMv7 VFP3 instructions to implement double precision comparison.
7252    __ vcmp(d7, d6);
7253    __ vmrs(pc);  // Move vector status bits to normal status bits.
7254    Label nan;
7255    __ b(vs, &nan);
7256    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
7257    __ mov(r0, Operand(LESS), LeaveCC, lt);
7258    __ mov(r0, Operand(GREATER), LeaveCC, gt);
7259    __ mov(pc, Operand(lr));
7260
7261    __ bind(&nan);
7262    // If one of the sides was a NaN then the v flag is set.  Load r0 with
7263    // whatever it takes to make the comparison fail, since comparisons with NaN
7264    // always fail.
7265    if (cc_ == lt || cc_ == le) {
7266      __ mov(r0, Operand(GREATER));
7267    } else {
7268      __ mov(r0, Operand(LESS));
7269    }
7270    __ mov(pc, Operand(lr));
7271  } else {
7272    // Checks for NaN in the doubles we have loaded.  Can return the answer or
7273    // fall through if neither is a NaN.  Also binds lhs_not_nan.
7274    EmitNanCheck(masm, &lhs_not_nan, cc_);
7275    // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the
7276    // answer.  Never falls through.
7277    EmitTwoNonNanDoubleComparison(masm, cc_);
7278  }
7279
7280  __ bind(&not_smis);
7281  // At this point we know we are dealing with two different objects,
7282  // and neither of them is a Smi.  The objects are in r0 and r1.
7283  if (strict_) {
7284    // This returns non-equal for some object types, or falls through if it
7285    // was not lucky.
7286    EmitStrictTwoHeapObjectCompare(masm);
7287  }
7288
7289  Label check_for_symbols;
7290  Label flat_string_check;
7291  // Check for heap-number-heap-number comparison.  Can jump to slow case,
7292  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
7293  // that case.  If the inputs are not doubles then jumps to check_for_symbols.
7294  // In this case r2 will contain the type of r0.  Never falls through.
7295  EmitCheckForTwoHeapNumbers(masm,
7296                             &both_loaded_as_doubles,
7297                             &check_for_symbols,
7298                             &flat_string_check);
7299
7300  __ bind(&check_for_symbols);
7301  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
7302  // symbols.
7303  if (cc_ == eq && !strict_) {
7304    // Either jumps to slow or returns the answer.  Assumes that r2 is the type
7305    // of r0 on entry.
7306    EmitCheckForSymbols(masm, &flat_string_check);
7307  }
7308
7309  // Check for both being sequential ASCII strings, and inline if that is the
7310  // case.
7311  __ bind(&flat_string_check);
7312
7313  __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
7314
7315  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
7316  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
7317                                                     r1,
7318                                                     r0,
7319                                                     r2,
7320                                                     r3,
7321                                                     r4,
7322                                                     r5);
7323  // Never falls through to here.
7324
7325  __ bind(&slow);
7326
7327  __ Push(r1, r0);
7328  // Figure out which native to call and setup the arguments.
7329  Builtins::JavaScript native;
7330  if (cc_ == eq) {
7331    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
7332  } else {
7333    native = Builtins::COMPARE;
7334    int ncr;  // NaN compare result
7335    if (cc_ == lt || cc_ == le) {
7336      ncr = GREATER;
7337    } else {
7338      ASSERT(cc_ == gt || cc_ == ge);  // remaining cases
7339      ncr = LESS;
7340    }
7341    __ mov(r0, Operand(Smi::FromInt(ncr)));
7342    __ push(r0);
7343  }
7344
7345  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
7346  // tagged as a small integer.
7347  __ InvokeBuiltin(native, JUMP_JS);
7348}
7349
7350
7351// We fall into this code if the operands were Smis, but the result was
7352// not (eg. overflow).  We branch into this code (to the not_smi label) if
7353// the operands were not both Smi.  The operands are in r0 and r1.  In order
7354// to call the C-implemented binary fp operation routines we need to end up
7355// with the double precision floating point operands in r0 and r1 (for the
7356// value in r1) and r2 and r3 (for the value in r0).
7357void GenericBinaryOpStub::HandleBinaryOpSlowCases(
7358    MacroAssembler* masm,
7359    Label* not_smi,
7360    Register lhs,
7361    Register rhs,
7362    const Builtins::JavaScript& builtin) {
7363  Label slow, slow_reverse, do_the_call;
7364  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
7365
7366  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
7367  Register heap_number_map = r6;
7368
7369  if (ShouldGenerateSmiCode()) {
7370    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7371
7372    // Smi-smi case (overflow).
7373    // Since both are Smis there is no heap number to overwrite, so allocate.
7374    // The new heap number is in r5.  r3 and r7 are scratch.
7375    __ AllocateHeapNumber(
7376        r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
7377
7378    // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
7379    // using registers d7 and d6 for the double values.
7380    if (use_fp_registers) {
7381      CpuFeatures::Scope scope(VFP3);
7382      __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
7383      __ vmov(s15, r7);
7384      __ vcvt_f64_s32(d7, s15);
7385      __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
7386      __ vmov(s13, r7);
7387      __ vcvt_f64_s32(d6, s13);
7388    } else {
7389      // Write Smi from rhs to r3 and r2 in double format.  r3 is scratch.
7390      __ mov(r7, Operand(rhs));
7391      ConvertToDoubleStub stub1(r3, r2, r7, r9);
7392      __ push(lr);
7393      __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
7394      // Write Smi from lhs to r1 and r0 in double format.  r9 is scratch.
7395      __ mov(r7, Operand(lhs));
7396      ConvertToDoubleStub stub2(r1, r0, r7, r9);
7397      __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
7398      __ pop(lr);
7399    }
7400    __ jmp(&do_the_call);  // Tail call.  No return.
7401  }
7402
7403  // We branch here if at least one of r0 and r1 is not a Smi.
7404  __ bind(not_smi);
7405  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7406
7407  // After this point we have the left hand side in r1 and the right hand side
7408  // in r0.
7409  if (lhs.is(r0)) {
7410    __ Swap(r0, r1, ip);
7411  }
7412
7413  if (ShouldGenerateFPCode()) {
7414    Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
7415
7416    if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
7417      switch (op_) {
7418        case Token::ADD:
7419        case Token::SUB:
7420        case Token::MUL:
7421        case Token::DIV:
7422          GenerateTypeTransition(masm);
7423          break;
7424
7425        default:
7426          break;
7427      }
7428      // Restore heap number map register.
7429      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7430    }
7431
7432    if (mode_ == NO_OVERWRITE) {
7433      // In the case where there is no chance of an overwritable float we may as
7434      // well do the allocation immediately while r0 and r1 are untouched.
7435      __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
7436    }
7437
7438    // Move r0 to a double in r2-r3.
7439    __ tst(r0, Operand(kSmiTagMask));
7440    __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
7441    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
7442    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7443    __ cmp(r4, heap_number_map);
7444    __ b(ne, &slow);
7445    if (mode_ == OVERWRITE_RIGHT) {
7446      __ mov(r5, Operand(r0));  // Overwrite this heap number.
7447    }
7448    if (use_fp_registers) {
7449      CpuFeatures::Scope scope(VFP3);
7450      // Load the double from tagged HeapNumber r0 to d7.
7451      __ sub(r7, r0, Operand(kHeapObjectTag));
7452      __ vldr(d7, r7, HeapNumber::kValueOffset);
7453    } else {
7454      // Calling convention says that second double is in r2 and r3.
7455      __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
7456    }
7457    __ jmp(&finished_loading_r0);
7458    __ bind(&r0_is_smi);
7459    if (mode_ == OVERWRITE_RIGHT) {
7460      // We can't overwrite a Smi so get address of new heap number into r5.
7461    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7462    }
7463
7464    if (use_fp_registers) {
7465      CpuFeatures::Scope scope(VFP3);
7466      // Convert smi in r0 to double in d7.
7467      __ mov(r7, Operand(r0, ASR, kSmiTagSize));
7468      __ vmov(s15, r7);
7469      __ vcvt_f64_s32(d7, s15);
7470    } else {
7471      // Write Smi from r0 to r3 and r2 in double format.
7472      __ mov(r7, Operand(r0));
7473      ConvertToDoubleStub stub3(r3, r2, r7, r4);
7474      __ push(lr);
7475      __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
7476      __ pop(lr);
7477    }
7478
7479    // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
7480    // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
7481    Label r1_is_not_smi;
7482    if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
7483      __ tst(r1, Operand(kSmiTagMask));
7484      __ b(ne, &r1_is_not_smi);
7485      GenerateTypeTransition(masm);
7486      // Restore heap number map register.
7487      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7488      __ jmp(&r1_is_smi);
7489    }
7490
7491    __ bind(&finished_loading_r0);
7492
7493    // Move r1 to a double in r0-r1.
7494    __ tst(r1, Operand(kSmiTagMask));
7495    __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
7496    __ bind(&r1_is_not_smi);
7497    __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
7498    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7499    __ cmp(r4, heap_number_map);
7500    __ b(ne, &slow);
7501    if (mode_ == OVERWRITE_LEFT) {
7502      __ mov(r5, Operand(r1));  // Overwrite this heap number.
7503    }
7504    if (use_fp_registers) {
7505      CpuFeatures::Scope scope(VFP3);
7506      // Load the double from tagged HeapNumber r1 to d6.
7507      __ sub(r7, r1, Operand(kHeapObjectTag));
7508      __ vldr(d6, r7, HeapNumber::kValueOffset);
7509    } else {
7510      // Calling convention says that first double is in r0 and r1.
7511      __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
7512    }
7513    __ jmp(&finished_loading_r1);
7514    __ bind(&r1_is_smi);
7515    if (mode_ == OVERWRITE_LEFT) {
7516      // We can't overwrite a Smi so get address of new heap number into r5.
7517    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7518    }
7519
7520    if (use_fp_registers) {
7521      CpuFeatures::Scope scope(VFP3);
7522      // Convert smi in r1 to double in d6.
7523      __ mov(r7, Operand(r1, ASR, kSmiTagSize));
7524      __ vmov(s13, r7);
7525      __ vcvt_f64_s32(d6, s13);
7526    } else {
7527      // Write Smi from r1 to r1 and r0 in double format.
7528      __ mov(r7, Operand(r1));
7529      ConvertToDoubleStub stub4(r1, r0, r7, r9);
7530      __ push(lr);
7531      __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
7532      __ pop(lr);
7533    }
7534
7535    __ bind(&finished_loading_r1);
7536
7537    __ bind(&do_the_call);
7538    // If we are inlining the operation using VFP3 instructions for
7539    // add, subtract, multiply, or divide, the arguments are in d6 and d7.
7540    if (use_fp_registers) {
7541      CpuFeatures::Scope scope(VFP3);
7542      // ARMv7 VFP3 instructions to implement
7543      // double precision, add, subtract, multiply, divide.
7544
7545      if (Token::MUL == op_) {
7546        __ vmul(d5, d6, d7);
7547      } else if (Token::DIV == op_) {
7548        __ vdiv(d5, d6, d7);
7549      } else if (Token::ADD == op_) {
7550        __ vadd(d5, d6, d7);
7551      } else if (Token::SUB == op_) {
7552        __ vsub(d5, d6, d7);
7553      } else {
7554        UNREACHABLE();
7555      }
7556      __ sub(r0, r5, Operand(kHeapObjectTag));
7557      __ vstr(d5, r0, HeapNumber::kValueOffset);
7558      __ add(r0, r0, Operand(kHeapObjectTag));
7559      __ mov(pc, lr);
7560    } else {
7561      // If we did not inline the operation, then the arguments are in:
7562      // r0: Left value (least significant part of mantissa).
7563      // r1: Left value (sign, exponent, top of mantissa).
7564      // r2: Right value (least significant part of mantissa).
7565      // r3: Right value (sign, exponent, top of mantissa).
7566      // r5: Address of heap number for result.
7567
7568      __ push(lr);   // For later.
7569      __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments.
7570      // Call C routine that may not cause GC or other trouble. r5 is callee
7571      // save.
7572      __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
7573      // Store answer in the overwritable heap number.
7574  #if !defined(USE_ARM_EABI)
7575      // Double returned in fp coprocessor register 0 and 1, encoded as register
7576      // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
7577      // substract the tag from r5.
7578      __ sub(r4, r5, Operand(kHeapObjectTag));
7579      __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
7580  #else
7581      // Double returned in registers 0 and 1.
7582      __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
7583  #endif
7584      __ mov(r0, Operand(r5));
7585      // And we are done.
7586      __ pop(pc);
7587    }
7588  }
7589
7590  if (lhs.is(r0)) {
7591    __ b(&slow);
7592    __ bind(&slow_reverse);
7593    __ Swap(r0, r1, ip);
7594  }
7595
7596  heap_number_map = no_reg;  // Don't use this any more from here on.
7597
7598  // We jump to here if something goes wrong (one param is not a number of any
7599  // sort or new-space allocation fails).
7600  __ bind(&slow);
7601
7602  // Push arguments to the stack
7603  __ Push(r1, r0);
7604
7605  if (Token::ADD == op_) {
7606    // Test for string arguments before calling runtime.
7607    // r1 : first argument
7608    // r0 : second argument
7609    // sp[0] : second argument
7610    // sp[4] : first argument
7611
7612    Label not_strings, not_string1, string1, string1_smi2;
7613    __ tst(r1, Operand(kSmiTagMask));
7614    __ b(eq, &not_string1);
7615    __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
7616    __ b(ge, &not_string1);
7617
7618    // First argument is a a string, test second.
7619    __ tst(r0, Operand(kSmiTagMask));
7620    __ b(eq, &string1_smi2);
7621    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7622    __ b(ge, &string1);
7623
7624    // First and second argument are strings.
7625    StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
7626    __ TailCallStub(&string_add_stub);
7627
7628    __ bind(&string1_smi2);
7629    // First argument is a string, second is a smi. Try to lookup the number
7630    // string for the smi in the number string cache.
7631    NumberToStringStub::GenerateLookupNumberStringCache(
7632        masm, r0, r2, r4, r5, r6, true, &string1);
7633
7634    // Replace second argument on stack and tailcall string add stub to make
7635    // the result.
7636    __ str(r2, MemOperand(sp, 0));
7637    __ TailCallStub(&string_add_stub);
7638
7639    // Only first argument is a string.
7640    __ bind(&string1);
7641    __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
7642
7643    // First argument was not a string, test second.
7644    __ bind(&not_string1);
7645    __ tst(r0, Operand(kSmiTagMask));
7646    __ b(eq, &not_strings);
7647    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
7648    __ b(ge, &not_strings);
7649
7650    // Only second argument is a string.
7651    __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
7652
7653    __ bind(&not_strings);
7654  }
7655
7656  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
7657}
7658
7659
7660// Tries to get a signed int32 out of a double precision floating point heap
7661// number.  Rounds towards 0.  Fastest for doubles that are in the ranges
7662// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff.  This corresponds
7663// almost to the range of signed int32 values that are not Smis.  Jumps to the
7664// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
7665// (excluding the endpoints).
7666static void GetInt32(MacroAssembler* masm,
7667                     Register source,
7668                     Register dest,
7669                     Register scratch,
7670                     Register scratch2,
7671                     Label* slow) {
7672  Label right_exponent, done;
7673  // Get exponent word.
7674  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
7675  // Get exponent alone in scratch2.
7676  __ Ubfx(scratch2,
7677          scratch,
7678          HeapNumber::kExponentShift,
7679          HeapNumber::kExponentBits);
7680  // Load dest with zero.  We use this either for the final shift or
7681  // for the answer.
7682  __ mov(dest, Operand(0));
7683  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
7684  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
7685  // the exponent that we are fastest at and also the highest exponent we can
7686  // handle here.
7687  const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
7688  // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
7689  // split it up to avoid a constant pool entry.  You can't do that in general
7690  // for cmp because of the overflow flag, but we know the exponent is in the
7691  // range 0-2047 so there is no overflow.
7692  int fudge_factor = 0x400;
7693  __ sub(scratch2, scratch2, Operand(fudge_factor));
7694  __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
7695  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
7696  __ b(eq, &right_exponent);
7697  // If the exponent is higher than that then go to slow case.  This catches
7698  // numbers that don't fit in a signed int32, infinities and NaNs.
7699  __ b(gt, slow);
7700
7701  // We know the exponent is smaller than 30 (biased).  If it is less than
7702  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
7703  // it rounds to zero.
7704  const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
7705  __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
7706  // Dest already has a Smi zero.
7707  __ b(lt, &done);
7708  if (!CpuFeatures::IsSupported(VFP3)) {
7709    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
7710    // get how much to shift down.
7711    __ rsb(dest, scratch2, Operand(30));
7712  }
7713  __ bind(&right_exponent);
7714  if (CpuFeatures::IsSupported(VFP3)) {
7715    CpuFeatures::Scope scope(VFP3);
7716    // ARMv7 VFP3 instructions implementing double precision to integer
7717    // conversion using round to zero.
7718    __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
7719    __ vmov(d7, scratch2, scratch);
7720    __ vcvt_s32_f64(s15, d7);
7721    __ vmov(dest, s15);
7722  } else {
7723    // Get the top bits of the mantissa.
7724    __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
7725    // Put back the implicit 1.
7726    __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
7727    // Shift up the mantissa bits to take up the space the exponent used to
7728    // take. We just orred in the implicit bit so that took care of one and
7729    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
7730    // distance.
7731    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
7732    __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
7733    // Put sign in zero flag.
7734    __ tst(scratch, Operand(HeapNumber::kSignMask));
7735    // Get the second half of the double. For some exponents we don't
7736    // actually need this because the bits get shifted out again, but
7737    // it's probably slower to test than just to do it.
7738    __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
7739    // Shift down 22 bits to get the last 10 bits.
7740    __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
7741    // Move down according to the exponent.
7742    __ mov(dest, Operand(scratch, LSR, dest));
7743    // Fix sign if sign bit was set.
7744    __ rsb(dest, dest, Operand(0), LeaveCC, ne);
7745  }
7746  __ bind(&done);
7747}
7748
7749// For bitwise ops where the inputs are not both Smis we here try to determine
7750// whether both inputs are either Smis or at least heap numbers that can be
7751// represented by a 32 bit signed value.  We truncate towards zero as required
7752// by the ES spec.  If this is the case we do the bitwise op and see if the
7753// result is a Smi.  If so, great, otherwise we try to find a heap number to
7754// write the answer into (either by allocating or by overwriting).
7755// On entry the operands are in lhs and rhs.  On exit the answer is in r0.
7756void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
7757                                                Register lhs,
7758                                                Register rhs) {
7759  Label slow, result_not_a_smi;
7760  Label rhs_is_smi, lhs_is_smi;
7761  Label done_checking_rhs, done_checking_lhs;
7762
7763  Register heap_number_map = r6;
7764  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
7765
7766  __ tst(lhs, Operand(kSmiTagMask));
7767  __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number.
7768  __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
7769  __ cmp(r4, heap_number_map);
7770  __ b(ne, &slow);
7771  GetInt32(masm, lhs, r3, r5, r4, &slow);
7772  __ jmp(&done_checking_lhs);
7773  __ bind(&lhs_is_smi);
7774  __ mov(r3, Operand(lhs, ASR, 1));
7775  __ bind(&done_checking_lhs);
7776
7777  __ tst(rhs, Operand(kSmiTagMask));
7778  __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number.
7779  __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
7780  __ cmp(r4, heap_number_map);
7781  __ b(ne, &slow);
7782  GetInt32(masm, rhs, r2, r5, r4, &slow);
7783  __ jmp(&done_checking_rhs);
7784  __ bind(&rhs_is_smi);
7785  __ mov(r2, Operand(rhs, ASR, 1));
7786  __ bind(&done_checking_rhs);
7787
7788  ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
7789
7790  // r0 and r1: Original operands (Smi or heap numbers).
7791  // r2 and r3: Signed int32 operands.
7792  switch (op_) {
7793    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
7794    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
7795    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
7796    case Token::SAR:
7797      // Use only the 5 least significant bits of the shift count.
7798      __ and_(r2, r2, Operand(0x1f));
7799      __ mov(r2, Operand(r3, ASR, r2));
7800      break;
7801    case Token::SHR:
7802      // Use only the 5 least significant bits of the shift count.
7803      __ and_(r2, r2, Operand(0x1f));
7804      __ mov(r2, Operand(r3, LSR, r2), SetCC);
7805      // SHR is special because it is required to produce a positive answer.
7806      // The code below for writing into heap numbers isn't capable of writing
7807      // the register as an unsigned int so we go to slow case if we hit this
7808      // case.
7809      __ b(mi, &slow);
7810      break;
7811    case Token::SHL:
7812      // Use only the 5 least significant bits of the shift count.
7813      __ and_(r2, r2, Operand(0x1f));
7814      __ mov(r2, Operand(r3, LSL, r2));
7815      break;
7816    default: UNREACHABLE();
7817  }
7818  // check that the *signed* result fits in a smi
7819  __ add(r3, r2, Operand(0x40000000), SetCC);
7820  __ b(mi, &result_not_a_smi);
7821  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
7822  __ Ret();
7823
7824  Label have_to_allocate, got_a_heap_number;
7825  __ bind(&result_not_a_smi);
7826  switch (mode_) {
7827    case OVERWRITE_RIGHT: {
7828      __ tst(rhs, Operand(kSmiTagMask));
7829      __ b(eq, &have_to_allocate);
7830      __ mov(r5, Operand(rhs));
7831      break;
7832    }
7833    case OVERWRITE_LEFT: {
7834      __ tst(lhs, Operand(kSmiTagMask));
7835      __ b(eq, &have_to_allocate);
7836      __ mov(r5, Operand(lhs));
7837      break;
7838    }
7839    case NO_OVERWRITE: {
7840      // Get a new heap number in r5.  r4 and r7 are scratch.
7841      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7842    }
7843    default: break;
7844  }
7845  __ bind(&got_a_heap_number);
7846  // r2: Answer as signed int32.
7847  // r5: Heap number to write answer into.
7848
7849  // Nothing can go wrong now, so move the heap number to r0, which is the
7850  // result.
7851  __ mov(r0, Operand(r5));
7852
7853  // Tail call that writes the int32 in r2 to the heap number in r0, using
7854  // r3 as scratch.  r0 is preserved and returned.
7855  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
7856  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
7857
7858  if (mode_ != NO_OVERWRITE) {
7859    __ bind(&have_to_allocate);
7860    // Get a new heap number in r5.  r4 and r7 are scratch.
7861    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
7862    __ jmp(&got_a_heap_number);
7863  }
7864
7865  // If all else failed then we go to the runtime system.
7866  __ bind(&slow);
7867  __ Push(lhs, rhs);  // Restore stack.
7868  switch (op_) {
7869    case Token::BIT_OR:
7870      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
7871      break;
7872    case Token::BIT_AND:
7873      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
7874      break;
7875    case Token::BIT_XOR:
7876      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
7877      break;
7878    case Token::SAR:
7879      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
7880      break;
7881    case Token::SHR:
7882      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
7883      break;
7884    case Token::SHL:
7885      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
7886      break;
7887    default:
7888      UNREACHABLE();
7889  }
7890}
7891
7892
7893// Can we multiply by x with max two shifts and an add.
7894// This answers yes to all integers from 2 to 10.
7895static bool IsEasyToMultiplyBy(int x) {
7896  if (x < 2) return false;                          // Avoid special cases.
7897  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
7898  if (IsPowerOf2(x)) return true;                   // Simple shift.
7899  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
7900  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
7901  return false;
7902}
7903
7904
7905// Can multiply by anything that IsEasyToMultiplyBy returns true for.
7906// Source and destination may be the same register.  This routine does
7907// not set carry and overflow the way a mul instruction would.
7908static void MultiplyByKnownInt(MacroAssembler* masm,
7909                               Register source,
7910                               Register destination,
7911                               int known_int) {
7912  if (IsPowerOf2(known_int)) {
7913    __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
7914  } else if (PopCountLessThanEqual2(known_int)) {
7915    int first_bit = BitPosition(known_int);
7916    int second_bit = BitPosition(known_int ^ (1 << first_bit));
7917    __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
7918    if (first_bit != 0) {
7919      __ mov(destination, Operand(destination, LSL, first_bit));
7920    }
7921  } else {
7922    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
7923    int the_bit = BitPosition(known_int + 1);
7924    __ rsb(destination, source, Operand(source, LSL, the_bit));
7925  }
7926}
7927
7928
7929// This function (as opposed to MultiplyByKnownInt) takes the known int in a
7930// a register for the cases where it doesn't know a good trick, and may deliver
7931// a result that needs shifting.
7932static void MultiplyByKnownInt2(
7933    MacroAssembler* masm,
7934    Register result,
7935    Register source,
7936    Register known_int_register,   // Smi tagged.
7937    int known_int,
7938    int* required_shift) {  // Including Smi tag shift
7939  switch (known_int) {
7940    case 3:
7941      __ add(result, source, Operand(source, LSL, 1));
7942      *required_shift = 1;
7943      break;
7944    case 5:
7945      __ add(result, source, Operand(source, LSL, 2));
7946      *required_shift = 1;
7947      break;
7948    case 6:
7949      __ add(result, source, Operand(source, LSL, 1));
7950      *required_shift = 2;
7951      break;
7952    case 7:
7953      __ rsb(result, source, Operand(source, LSL, 3));
7954      *required_shift = 1;
7955      break;
7956    case 9:
7957      __ add(result, source, Operand(source, LSL, 3));
7958      *required_shift = 1;
7959      break;
7960    case 10:
7961      __ add(result, source, Operand(source, LSL, 2));
7962      *required_shift = 2;
7963      break;
7964    default:
7965      ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient.
7966      __ mul(result, source, known_int_register);
7967      *required_shift = 0;
7968  }
7969}
7970
7971
7972const char* GenericBinaryOpStub::GetName() {
7973  if (name_ != NULL) return name_;
7974  const int len = 100;
7975  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
7976  if (name_ == NULL) return "OOM";
7977  const char* op_name = Token::Name(op_);
7978  const char* overwrite_name;
7979  switch (mode_) {
7980    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
7981    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
7982    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
7983    default: overwrite_name = "UnknownOverwrite"; break;
7984  }
7985
7986  OS::SNPrintF(Vector<char>(name_, len),
7987               "GenericBinaryOpStub_%s_%s%s_%s",
7988               op_name,
7989               overwrite_name,
7990               specialized_on_rhs_ ? "_ConstantRhs" : "",
7991               BinaryOpIC::GetName(runtime_operands_type_));
7992  return name_;
7993}
7994
7995
7996
7997void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7998  // lhs_ : x
7999  // rhs_ : y
8000  // r0   : result
8001
8002  Register result = r0;
8003  Register lhs = lhs_;
8004  Register rhs = rhs_;
8005
8006  // This code can't cope with other register allocations yet.
8007  ASSERT(result.is(r0) &&
8008         ((lhs.is(r0) && rhs.is(r1)) ||
8009          (lhs.is(r1) && rhs.is(r0))));
8010
8011  Register smi_test_reg = VirtualFrame::scratch0();
8012  Register scratch = VirtualFrame::scratch1();
8013
8014  // All ops need to know whether we are dealing with two Smis.  Set up
8015  // smi_test_reg to tell us that.
8016  if (ShouldGenerateSmiCode()) {
8017    __ orr(smi_test_reg, lhs, Operand(rhs));
8018  }
8019
8020  switch (op_) {
8021    case Token::ADD: {
8022      Label not_smi;
8023      // Fast path.
8024      if (ShouldGenerateSmiCode()) {
8025        ASSERT(kSmiTag == 0);  // Adjust code below.
8026        __ tst(smi_test_reg, Operand(kSmiTagMask));
8027        __ b(ne, &not_smi);
8028        __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
8029        // Return if no overflow.
8030        __ Ret(vc);
8031        __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
8032      }
8033      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
8034      break;
8035    }
8036
8037    case Token::SUB: {
8038      Label not_smi;
8039      // Fast path.
8040      if (ShouldGenerateSmiCode()) {
8041        ASSERT(kSmiTag == 0);  // Adjust code below.
8042        __ tst(smi_test_reg, Operand(kSmiTagMask));
8043        __ b(ne, &not_smi);
8044        if (lhs.is(r1)) {
8045          __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
8046          // Return if no overflow.
8047          __ Ret(vc);
8048          __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
8049        } else {
8050          __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically.
8051          // Return if no overflow.
8052          __ Ret(vc);
8053          __ add(r0, r0, Operand(r1));  // Revert optimistic subtract.
8054        }
8055      }
8056      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
8057      break;
8058    }
8059
8060    case Token::MUL: {
8061      Label not_smi, slow;
8062      if (ShouldGenerateSmiCode()) {
8063        ASSERT(kSmiTag == 0);  // adjust code below
8064        __ tst(smi_test_reg, Operand(kSmiTagMask));
8065        Register scratch2 = smi_test_reg;
8066        smi_test_reg = no_reg;
8067        __ b(ne, &not_smi);
8068        // Remove tag from one operand (but keep sign), so that result is Smi.
8069        __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
8070        // Do multiplication
8071        // scratch = lower 32 bits of ip * lhs.
8072        __ smull(scratch, scratch2, lhs, ip);
8073        // Go slow on overflows (overflow bit is not set).
8074        __ mov(ip, Operand(scratch, ASR, 31));
8075        // No overflow if higher 33 bits are identical.
8076        __ cmp(ip, Operand(scratch2));
8077        __ b(ne, &slow);
8078        // Go slow on zero result to handle -0.
8079        __ tst(scratch, Operand(scratch));
8080        __ mov(result, Operand(scratch), LeaveCC, ne);
8081        __ Ret(ne);
8082        // We need -0 if we were multiplying a negative number with 0 to get 0.
8083        // We know one of them was zero.
8084        __ add(scratch2, rhs, Operand(lhs), SetCC);
8085        __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
8086        __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
8087        // Slow case.  We fall through here if we multiplied a negative number
8088        // with 0, because that would mean we should produce -0.
8089        __ bind(&slow);
8090      }
8091      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
8092      break;
8093    }
8094
8095    case Token::DIV:
8096    case Token::MOD: {
8097      Label not_smi;
8098      if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
8099        Label smi_is_unsuitable;
8100        __ BranchOnNotSmi(lhs, &not_smi);
8101        if (IsPowerOf2(constant_rhs_)) {
8102          if (op_ == Token::MOD) {
8103            __ and_(rhs,
8104                    lhs,
8105                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
8106                    SetCC);
8107            // We now have the answer, but if the input was negative we also
8108            // have the sign bit.  Our work is done if the result is
8109            // positive or zero:
8110            if (!rhs.is(r0)) {
8111              __ mov(r0, rhs, LeaveCC, pl);
8112            }
8113            __ Ret(pl);
8114            // A mod of a negative left hand side must return a negative number.
8115            // Unfortunately if the answer is 0 then we must return -0.  And we
8116            // already optimistically trashed rhs so we may need to restore it.
8117            __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
8118            // Next two instructions are conditional on the answer being -0.
8119            __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
8120            __ b(eq, &smi_is_unsuitable);
8121            // We need to subtract the dividend.  Eg. -3 % 4 == -3.
8122            __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
8123          } else {
8124            ASSERT(op_ == Token::DIV);
8125            __ tst(lhs,
8126                   Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
8127            __ b(ne, &smi_is_unsuitable);  // Go slow on negative or remainder.
8128            int shift = 0;
8129            int d = constant_rhs_;
8130            while ((d & 1) == 0) {
8131              d >>= 1;
8132              shift++;
8133            }
8134            __ mov(r0, Operand(lhs, LSR, shift));
8135            __ bic(r0, r0, Operand(kSmiTagMask));
8136          }
8137        } else {
8138          // Not a power of 2.
8139          __ tst(lhs, Operand(0x80000000u));
8140          __ b(ne, &smi_is_unsuitable);
8141          // Find a fixed point reciprocal of the divisor so we can divide by
8142          // multiplying.
8143          double divisor = 1.0 / constant_rhs_;
8144          int shift = 32;
8145          double scale = 4294967296.0;  // 1 << 32.
8146          uint32_t mul;
8147          // Maximise the precision of the fixed point reciprocal.
8148          while (true) {
8149            mul = static_cast<uint32_t>(scale * divisor);
8150            if (mul >= 0x7fffffff) break;
8151            scale *= 2.0;
8152            shift++;
8153          }
8154          mul++;
8155          Register scratch2 = smi_test_reg;
8156          smi_test_reg = no_reg;
8157          __ mov(scratch2, Operand(mul));
8158          __ umull(scratch, scratch2, scratch2, lhs);
8159          __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
8160          // scratch2 is lhs / rhs.  scratch2 is not Smi tagged.
8161          // rhs is still the known rhs.  rhs is Smi tagged.
8162          // lhs is still the unkown lhs.  lhs is Smi tagged.
8163          int required_scratch_shift = 0;  // Including the Smi tag shift of 1.
8164          // scratch = scratch2 * rhs.
8165          MultiplyByKnownInt2(masm,
8166                              scratch,
8167                              scratch2,
8168                              rhs,
8169                              constant_rhs_,
8170                              &required_scratch_shift);
8171          // scratch << required_scratch_shift is now the Smi tagged rhs *
8172          // (lhs / rhs) where / indicates integer division.
8173          if (op_ == Token::DIV) {
8174            __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
8175            __ b(ne, &smi_is_unsuitable);  // There was a remainder.
8176            __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
8177          } else {
8178            ASSERT(op_ == Token::MOD);
8179            __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
8180          }
8181        }
8182        __ Ret();
8183        __ bind(&smi_is_unsuitable);
8184      } else if (op_ == Token::MOD &&
8185                 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
8186                 runtime_operands_type_ != BinaryOpIC::STRINGS) {
8187        // Do generate a bit of smi code for modulus even though the default for
8188        // modulus is not to do it, but as the ARM processor has no coprocessor
8189        // support for modulus checking for smis makes sense.
8190        Label slow;
8191        ASSERT(!ShouldGenerateSmiCode());
8192        ASSERT(kSmiTag == 0);  // Adjust code below.
8193        // Check for two positive smis.
8194        __ orr(smi_test_reg, lhs, Operand(rhs));
8195        __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
8196        __ b(ne, &slow);
8197        // Check that rhs is a power of two and not zero.
8198        __ sub(scratch, rhs, Operand(1), SetCC);
8199        __ b(mi, &slow);
8200        __ tst(rhs, scratch);
8201        __ b(ne, &slow);
8202        // Calculate power of two modulus.
8203        __ and_(result, lhs, Operand(scratch));
8204        __ Ret();
8205        __ bind(&slow);
8206      }
8207      HandleBinaryOpSlowCases(
8208          masm,
8209          &not_smi,
8210          lhs,
8211          rhs,
8212          op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
8213      break;
8214    }
8215
8216    case Token::BIT_OR:
8217    case Token::BIT_AND:
8218    case Token::BIT_XOR:
8219    case Token::SAR:
8220    case Token::SHR:
8221    case Token::SHL: {
8222      Label slow;
8223      ASSERT(kSmiTag == 0);  // adjust code below
8224      __ tst(smi_test_reg, Operand(kSmiTagMask));
8225      __ b(ne, &slow);
8226      Register scratch2 = smi_test_reg;
8227      smi_test_reg = no_reg;
8228      switch (op_) {
8229        case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break;
8230        case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
8231        case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
8232        case Token::SAR:
8233          // Remove tags from right operand.
8234          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8235          __ mov(result, Operand(lhs, ASR, scratch2));
8236          // Smi tag result.
8237          __ bic(result, result, Operand(kSmiTagMask));
8238          break;
8239        case Token::SHR:
8240          // Remove tags from operands.  We can't do this on a 31 bit number
8241          // because then the 0s get shifted into bit 30 instead of bit 31.
8242          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
8243          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8244          __ mov(scratch, Operand(scratch, LSR, scratch2));
8245          // Unsigned shift is not allowed to produce a negative number, so
8246          // check the sign bit and the sign bit after Smi tagging.
8247          __ tst(scratch, Operand(0xc0000000));
8248          __ b(ne, &slow);
8249          // Smi tag result.
8250          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
8251          break;
8252        case Token::SHL:
8253          // Remove tags from operands.
8254          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
8255          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
8256          __ mov(scratch, Operand(scratch, LSL, scratch2));
8257          // Check that the signed result fits in a Smi.
8258          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
8259          __ b(mi, &slow);
8260          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
8261          break;
8262        default: UNREACHABLE();
8263      }
8264      __ Ret();
8265      __ bind(&slow);
8266      HandleNonSmiBitwiseOp(masm, lhs, rhs);
8267      break;
8268    }
8269
8270    default: UNREACHABLE();
8271  }
8272  // This code should be unreachable.
8273  __ stop("Unreachable");
8274
8275  // Generate an unreachable reference to the DEFAULT stub so that it can be
8276  // found at the end of this stub when clearing ICs at GC.
8277  // TODO(kaznacheev): Check performance impact and get rid of this.
8278  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
8279    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
8280    __ CallStub(&uninit);
8281  }
8282}
8283
8284
8285void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
8286  Label get_result;
8287
8288  __ Push(r1, r0);
8289
8290  // Internal frame is necessary to handle exceptions properly.
8291  __ EnterInternalFrame();
8292  // Call the stub proper to get the result in r0.
8293  __ Call(&get_result);
8294  __ LeaveInternalFrame();
8295
8296  __ push(r0);
8297
8298  __ mov(r0, Operand(Smi::FromInt(MinorKey())));
8299  __ push(r0);
8300  __ mov(r0, Operand(Smi::FromInt(op_)));
8301  __ push(r0);
8302  __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
8303  __ push(r0);
8304
8305  __ TailCallExternalReference(
8306      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
8307      6,
8308      1);
8309
8310  // The entry point for the result calculation is assumed to be immediately
8311  // after this sequence.
8312  __ bind(&get_result);
8313}
8314
8315
8316Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
8317  GenericBinaryOpStub stub(key, type_info);
8318  return stub.GetCode();
8319}
8320
8321
8322void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
8323  // Argument is a number and is on stack and in r0.
8324  Label runtime_call;
8325  Label input_not_smi;
8326  Label loaded;
8327
8328  if (CpuFeatures::IsSupported(VFP3)) {
8329    // Load argument and check if it is a smi.
8330    __ BranchOnNotSmi(r0, &input_not_smi);
8331
8332    CpuFeatures::Scope scope(VFP3);
8333    // Input is a smi. Convert to double and load the low and high words
8334    // of the double into r2, r3.
8335    __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
8336    __ b(&loaded);
8337
8338    __ bind(&input_not_smi);
8339    // Check if input is a HeapNumber.
8340    __ CheckMap(r0,
8341                r1,
8342                Heap::kHeapNumberMapRootIndex,
8343                &runtime_call,
8344                true);
8345    // Input is a HeapNumber. Load it to a double register and store the
8346    // low and high words into r2, r3.
8347    __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
8348
8349    __ bind(&loaded);
8350    // r2 = low 32 bits of double value
8351    // r3 = high 32 bits of double value
8352    // Compute hash (the shifts are arithmetic):
8353    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
8354    __ eor(r1, r2, Operand(r3));
8355    __ eor(r1, r1, Operand(r1, ASR, 16));
8356    __ eor(r1, r1, Operand(r1, ASR, 8));
8357    ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
8358    __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
8359
8360    // r2 = low 32 bits of double value.
8361    // r3 = high 32 bits of double value.
8362    // r1 = TranscendentalCache::hash(double value).
8363    __ mov(r0,
8364           Operand(ExternalReference::transcendental_cache_array_address()));
8365    // r0 points to cache array.
8366    __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0])));
8367    // r0 points to the cache for the type type_.
8368    // If NULL, the cache hasn't been initialized yet, so go through runtime.
8369    __ cmp(r0, Operand(0));
8370    __ b(eq, &runtime_call);
8371
8372#ifdef DEBUG
8373    // Check that the layout of cache elements match expectations.
8374    { TranscendentalCache::Element test_elem[2];
8375      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
8376      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
8377      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
8378      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
8379      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
8380      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
8381      CHECK_EQ(0, elem_in0 - elem_start);
8382      CHECK_EQ(kIntSize, elem_in1 - elem_start);
8383      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
8384    }
8385#endif
8386
8387    // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
8388    __ add(r1, r1, Operand(r1, LSL, 1));
8389    __ add(r0, r0, Operand(r1, LSL, 2));
8390    // Check if cache matches: Double value is stored in uint32_t[2] array.
8391    __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit());
8392    __ cmp(r2, r4);
8393    __ b(ne, &runtime_call);
8394    __ cmp(r3, r5);
8395    __ b(ne, &runtime_call);
8396    // Cache hit. Load result, pop argument and return.
8397    __ mov(r0, Operand(r6));
8398    __ pop();
8399    __ Ret();
8400  }
8401
8402  __ bind(&runtime_call);
8403  __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
8404}
8405
8406
8407Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
8408  switch (type_) {
8409    // Add more cases when necessary.
8410    case TranscendentalCache::SIN: return Runtime::kMath_sin;
8411    case TranscendentalCache::COS: return Runtime::kMath_cos;
8412    default:
8413      UNIMPLEMENTED();
8414      return Runtime::kAbort;
8415  }
8416}
8417
8418
8419void StackCheckStub::Generate(MacroAssembler* masm) {
8420  // Do tail-call to runtime routine.  Runtime routines expect at least one
8421  // argument, so give it a Smi.
8422  __ mov(r0, Operand(Smi::FromInt(0)));
8423  __ push(r0);
8424  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
8425
8426  __ StubReturn(1);
8427}
8428
8429
8430void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
8431  Label slow, done;
8432
8433  Register heap_number_map = r6;
8434  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8435
8436  if (op_ == Token::SUB) {
8437    // Check whether the value is a smi.
8438    Label try_float;
8439    __ tst(r0, Operand(kSmiTagMask));
8440    __ b(ne, &try_float);
8441
8442    // Go slow case if the value of the expression is zero
8443    // to make sure that we switch between 0 and -0.
8444    __ cmp(r0, Operand(0));
8445    __ b(eq, &slow);
8446
8447    // The value of the expression is a smi that is not zero.  Try
8448    // optimistic subtraction '0 - value'.
8449    __ rsb(r1, r0, Operand(0), SetCC);
8450    __ b(vs, &slow);
8451
8452    __ mov(r0, Operand(r1));  // Set r0 to result.
8453    __ b(&done);
8454
8455    __ bind(&try_float);
8456    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
8457    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8458    __ cmp(r1, heap_number_map);
8459    __ b(ne, &slow);
8460    // r0 is a heap number.  Get a new heap number in r1.
8461    if (overwrite_) {
8462      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8463      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
8464      __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8465    } else {
8466      __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
8467      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
8468      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
8469      __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
8470      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
8471      __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
8472      __ mov(r0, Operand(r1));
8473    }
8474  } else if (op_ == Token::BIT_NOT) {
8475    // Check if the operand is a heap number.
8476    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
8477    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
8478    __ cmp(r1, heap_number_map);
8479    __ b(ne, &slow);
8480
8481    // Convert the heap number is r0 to an untagged integer in r1.
8482    GetInt32(masm, r0, r1, r2, r3, &slow);
8483
8484    // Do the bitwise operation (move negated) and check if the result
8485    // fits in a smi.
8486    Label try_float;
8487    __ mvn(r1, Operand(r1));
8488    __ add(r2, r1, Operand(0x40000000), SetCC);
8489    __ b(mi, &try_float);
8490    __ mov(r0, Operand(r1, LSL, kSmiTagSize));
8491    __ b(&done);
8492
8493    __ bind(&try_float);
8494    if (!overwrite_) {
8495      // Allocate a fresh heap number, but don't overwrite r0 until
8496      // we're sure we can do it without going through the slow case
8497      // that needs the value in r0.
8498      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
8499      __ mov(r0, Operand(r2));
8500    }
8501
8502    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
8503    // have to set up a frame.
8504    WriteInt32ToHeapNumberStub stub(r1, r0, r2);
8505    __ push(lr);
8506    __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
8507    __ pop(lr);
8508  } else {
8509    UNIMPLEMENTED();
8510  }
8511
8512  __ bind(&done);
8513  __ StubReturn(1);
8514
8515  // Handle the slow case by jumping to the JavaScript builtin.
8516  __ bind(&slow);
8517  __ push(r0);
8518  switch (op_) {
8519    case Token::SUB:
8520      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
8521      break;
8522    case Token::BIT_NOT:
8523      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
8524      break;
8525    default:
8526      UNREACHABLE();
8527  }
8528}
8529
8530
8531void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
8532  // r0 holds the exception.
8533
8534  // Adjust this code if not the case.
8535  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
8536
8537  // Drop the sp to the top of the handler.
8538  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
8539  __ ldr(sp, MemOperand(r3));
8540
8541  // Restore the next handler and frame pointer, discard handler state.
8542  ASSERT(StackHandlerConstants::kNextOffset == 0);
8543  __ pop(r2);
8544  __ str(r2, MemOperand(r3));
8545  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
8546  __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
8547
8548  // Before returning we restore the context from the frame pointer if
8549  // not NULL.  The frame pointer is NULL in the exception handler of a
8550  // JS entry frame.
8551  __ cmp(fp, Operand(0));
8552  // Set cp to NULL if fp is NULL.
8553  __ mov(cp, Operand(0), LeaveCC, eq);
8554  // Restore cp otherwise.
8555  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
8556#ifdef DEBUG
8557  if (FLAG_debug_code) {
8558    __ mov(lr, Operand(pc));
8559  }
8560#endif
8561  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
8562  __ pop(pc);
8563}
8564
8565
8566void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
8567                                          UncatchableExceptionType type) {
8568  // Adjust this code if not the case.
8569  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
8570
8571  // Drop sp to the top stack handler.
8572  __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
8573  __ ldr(sp, MemOperand(r3));
8574
8575  // Unwind the handlers until the ENTRY handler is found.
8576  Label loop, done;
8577  __ bind(&loop);
8578  // Load the type of the current stack handler.
8579  const int kStateOffset = StackHandlerConstants::kStateOffset;
8580  __ ldr(r2, MemOperand(sp, kStateOffset));
8581  __ cmp(r2, Operand(StackHandler::ENTRY));
8582  __ b(eq, &done);
8583  // Fetch the next handler in the list.
8584  const int kNextOffset = StackHandlerConstants::kNextOffset;
8585  __ ldr(sp, MemOperand(sp, kNextOffset));
8586  __ jmp(&loop);
8587  __ bind(&done);
8588
8589  // Set the top handler address to next handler past the current ENTRY handler.
8590  ASSERT(StackHandlerConstants::kNextOffset == 0);
8591  __ pop(r2);
8592  __ str(r2, MemOperand(r3));
8593
8594  if (type == OUT_OF_MEMORY) {
8595    // Set external caught exception to false.
8596    ExternalReference external_caught(Top::k_external_caught_exception_address);
8597    __ mov(r0, Operand(false));
8598    __ mov(r2, Operand(external_caught));
8599    __ str(r0, MemOperand(r2));
8600
8601    // Set pending exception and r0 to out of memory exception.
8602    Failure* out_of_memory = Failure::OutOfMemoryException();
8603    __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
8604    __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
8605    __ str(r0, MemOperand(r2));
8606  }
8607
8608  // Stack layout at this point. See also StackHandlerConstants.
8609  // sp ->   state (ENTRY)
8610  //         fp
8611  //         lr
8612
8613  // Discard handler state (r2 is not used) and restore frame pointer.
8614  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
8615  __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
8616  // Before returning we restore the context from the frame pointer if
8617  // not NULL.  The frame pointer is NULL in the exception handler of a
8618  // JS entry frame.
8619  __ cmp(fp, Operand(0));
8620  // Set cp to NULL if fp is NULL.
8621  __ mov(cp, Operand(0), LeaveCC, eq);
8622  // Restore cp otherwise.
8623  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
8624#ifdef DEBUG
8625  if (FLAG_debug_code) {
8626    __ mov(lr, Operand(pc));
8627  }
8628#endif
8629  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
8630  __ pop(pc);
8631}
8632
8633
8634void CEntryStub::GenerateCore(MacroAssembler* masm,
8635                              Label* throw_normal_exception,
8636                              Label* throw_termination_exception,
8637                              Label* throw_out_of_memory_exception,
8638                              bool do_gc,
8639                              bool always_allocate,
8640                              int frame_alignment_skew) {
8641  // r0: result parameter for PerformGC, if any
8642  // r4: number of arguments including receiver  (C callee-saved)
8643  // r5: pointer to builtin function  (C callee-saved)
8644  // r6: pointer to the first argument (C callee-saved)
8645
8646  if (do_gc) {
8647    // Passing r0.
8648    __ PrepareCallCFunction(1, r1);
8649    __ CallCFunction(ExternalReference::perform_gc_function(), 1);
8650  }
8651
8652  ExternalReference scope_depth =
8653      ExternalReference::heap_always_allocate_scope_depth();
8654  if (always_allocate) {
8655    __ mov(r0, Operand(scope_depth));
8656    __ ldr(r1, MemOperand(r0));
8657    __ add(r1, r1, Operand(1));
8658    __ str(r1, MemOperand(r0));
8659  }
8660
8661  // Call C built-in.
8662  // r0 = argc, r1 = argv
8663  __ mov(r0, Operand(r4));
8664  __ mov(r1, Operand(r6));
8665
8666  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
8667  int frame_alignment_mask = frame_alignment - 1;
8668#if defined(V8_HOST_ARCH_ARM)
8669  if (FLAG_debug_code) {
8670    if (frame_alignment > kPointerSize) {
8671      Label alignment_as_expected;
8672      ASSERT(IsPowerOf2(frame_alignment));
8673      __ sub(r2, sp, Operand(frame_alignment_skew));
8674      __ tst(r2, Operand(frame_alignment_mask));
8675      __ b(eq, &alignment_as_expected);
8676      // Don't use Check here, as it will call Runtime_Abort re-entering here.
8677      __ stop("Unexpected alignment");
8678      __ bind(&alignment_as_expected);
8679    }
8680  }
8681#endif
8682
8683  // Just before the call (jump) below lr is pushed, so the actual alignment is
8684  // adding one to the current skew.
8685  int alignment_before_call =
8686      (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
8687  if (alignment_before_call > 0) {
8688    // Push until the alignment before the call is met.
8689    __ mov(r2, Operand(0));
8690    for (int i = alignment_before_call;
8691        (i & frame_alignment_mask) != 0;
8692        i += kPointerSize) {
8693      __ push(r2);
8694    }
8695  }
8696
8697  // TODO(1242173): To let the GC traverse the return address of the exit
8698  // frames, we need to know where the return address is. Right now,
8699  // we push it on the stack to be able to find it again, but we never
8700  // restore from it in case of changes, which makes it impossible to
8701  // support moving the C entry code stub. This should be fixed, but currently
8702  // this is OK because the CEntryStub gets generated so early in the V8 boot
8703  // sequence that it is not moving ever.
8704  masm->add(lr, pc, Operand(4));  // Compute return address: (pc + 8) + 4
8705  masm->push(lr);
8706  masm->Jump(r5);
8707
8708  // Restore sp back to before aligning the stack.
8709  if (alignment_before_call > 0) {
8710    __ add(sp, sp, Operand(alignment_before_call));
8711  }
8712
8713  if (always_allocate) {
8714    // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
8715    // though (contain the result).
8716    __ mov(r2, Operand(scope_depth));
8717    __ ldr(r3, MemOperand(r2));
8718    __ sub(r3, r3, Operand(1));
8719    __ str(r3, MemOperand(r2));
8720  }
8721
8722  // check for failure result
8723  Label failure_returned;
8724  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
8725  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
8726  __ add(r2, r0, Operand(1));
8727  __ tst(r2, Operand(kFailureTagMask));
8728  __ b(eq, &failure_returned);
8729
8730  // Exit C frame and return.
8731  // r0:r1: result
8732  // sp: stack pointer
8733  // fp: frame pointer
8734  __ LeaveExitFrame(mode_);
8735
8736  // check if we should retry or throw exception
8737  Label retry;
8738  __ bind(&failure_returned);
8739  ASSERT(Failure::RETRY_AFTER_GC == 0);
8740  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
8741  __ b(eq, &retry);
8742
8743  // Special handling of out of memory exceptions.
8744  Failure* out_of_memory = Failure::OutOfMemoryException();
8745  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
8746  __ b(eq, throw_out_of_memory_exception);
8747
8748  // Retrieve the pending exception and clear the variable.
8749  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
8750  __ ldr(r3, MemOperand(ip));
8751  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8752  __ ldr(r0, MemOperand(ip));
8753  __ str(r3, MemOperand(ip));
8754
8755  // Special handling of termination exceptions which are uncatchable
8756  // by javascript code.
8757  __ cmp(r0, Operand(Factory::termination_exception()));
8758  __ b(eq, throw_termination_exception);
8759
8760  // Handle normal exception.
8761  __ jmp(throw_normal_exception);
8762
8763  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
8764}
8765
8766
8767void CEntryStub::Generate(MacroAssembler* masm) {
8768  // Called from JavaScript; parameters are on stack as if calling JS function
8769  // r0: number of arguments including receiver
8770  // r1: pointer to builtin function
8771  // fp: frame pointer  (restored after C call)
8772  // sp: stack pointer  (restored as callee's sp after C call)
8773  // cp: current context  (C callee-saved)
8774
8775  // Result returned in r0 or r0+r1 by default.
8776
8777  // NOTE: Invocations of builtins may return failure objects
8778  // instead of a proper result. The builtin entry handles
8779  // this by performing a garbage collection and retrying the
8780  // builtin once.
8781
8782  // Enter the exit frame that transitions from JavaScript to C++.
8783  __ EnterExitFrame(mode_);
8784
8785  // r4: number of arguments (C callee-saved)
8786  // r5: pointer to builtin function (C callee-saved)
8787  // r6: pointer to first argument (C callee-saved)
8788
8789  Label throw_normal_exception;
8790  Label throw_termination_exception;
8791  Label throw_out_of_memory_exception;
8792
8793  // Call into the runtime system.
8794  GenerateCore(masm,
8795               &throw_normal_exception,
8796               &throw_termination_exception,
8797               &throw_out_of_memory_exception,
8798               false,
8799               false,
8800               -kPointerSize);
8801
8802  // Do space-specific GC and retry runtime call.
8803  GenerateCore(masm,
8804               &throw_normal_exception,
8805               &throw_termination_exception,
8806               &throw_out_of_memory_exception,
8807               true,
8808               false,
8809               0);
8810
8811  // Do full GC and retry runtime call one final time.
8812  Failure* failure = Failure::InternalError();
8813  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
8814  GenerateCore(masm,
8815               &throw_normal_exception,
8816               &throw_termination_exception,
8817               &throw_out_of_memory_exception,
8818               true,
8819               true,
8820               kPointerSize);
8821
8822  __ bind(&throw_out_of_memory_exception);
8823  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
8824
8825  __ bind(&throw_termination_exception);
8826  GenerateThrowUncatchable(masm, TERMINATION);
8827
8828  __ bind(&throw_normal_exception);
8829  GenerateThrowTOS(masm);
8830}
8831
8832
8833void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
8834  // r0: code entry
8835  // r1: function
8836  // r2: receiver
8837  // r3: argc
8838  // [sp+0]: argv
8839
8840  Label invoke, exit;
8841
8842  // Called from C, so do not pop argc and args on exit (preserve sp)
8843  // No need to save register-passed args
8844  // Save callee-saved registers (incl. cp and fp), sp, and lr
8845  __ stm(db_w, sp, kCalleeSaved | lr.bit());
8846
8847  // Get address of argv, see stm above.
8848  // r0: code entry
8849  // r1: function
8850  // r2: receiver
8851  // r3: argc
8852  __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize));  // argv
8853
8854  // Push a frame with special values setup to mark it as an entry frame.
8855  // r0: code entry
8856  // r1: function
8857  // r2: receiver
8858  // r3: argc
8859  // r4: argv
8860  __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used.
8861  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
8862  __ mov(r7, Operand(Smi::FromInt(marker)));
8863  __ mov(r6, Operand(Smi::FromInt(marker)));
8864  __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
8865  __ ldr(r5, MemOperand(r5));
8866  __ Push(r8, r7, r6, r5);
8867
8868  // Setup frame pointer for the frame to be pushed.
8869  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
8870
8871  // Call a faked try-block that does the invoke.
8872  __ bl(&invoke);
8873
8874  // Caught exception: Store result (exception) in the pending
8875  // exception field in the JSEnv and return a failure sentinel.
8876  // Coming in here the fp will be invalid because the PushTryHandler below
8877  // sets it to 0 to signal the existence of the JSEntry frame.
8878  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8879  __ str(r0, MemOperand(ip));
8880  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
8881  __ b(&exit);
8882
8883  // Invoke: Link this frame into the handler chain.
8884  __ bind(&invoke);
8885  // Must preserve r0-r4, r5-r7 are available.
8886  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
8887  // If an exception not caught by another handler occurs, this handler
8888  // returns control to the code after the bl(&invoke) above, which
8889  // restores all kCalleeSaved registers (including cp and fp) to their
8890  // saved values before returning a failure to C.
8891
8892  // Clear any pending exceptions.
8893  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
8894  __ ldr(r5, MemOperand(ip));
8895  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
8896  __ str(r5, MemOperand(ip));
8897
8898  // Invoke the function by calling through JS entry trampoline builtin.
8899  // Notice that we cannot store a reference to the trampoline code directly in
8900  // this stub, because runtime stubs are not traversed when doing GC.
8901
8902  // Expected registers by Builtins::JSEntryTrampoline
8903  // r0: code entry
8904  // r1: function
8905  // r2: receiver
8906  // r3: argc
8907  // r4: argv
8908  if (is_construct) {
8909    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
8910    __ mov(ip, Operand(construct_entry));
8911  } else {
8912    ExternalReference entry(Builtins::JSEntryTrampoline);
8913    __ mov(ip, Operand(entry));
8914  }
8915  __ ldr(ip, MemOperand(ip));  // deref address
8916
8917  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
8918  // macro for the add instruction because we don't want the coverage tool
8919  // inserting instructions here after we read the pc.
8920  __ mov(lr, Operand(pc));
8921  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
8922
8923  // Unlink this frame from the handler chain. When reading the
8924  // address of the next handler, there is no need to use the address
8925  // displacement since the current stack pointer (sp) points directly
8926  // to the stack handler.
8927  __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
8928  __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
8929  __ str(r3, MemOperand(ip));
8930  // No need to restore registers
8931  __ add(sp, sp, Operand(StackHandlerConstants::kSize));
8932
8933
8934  __ bind(&exit);  // r0 holds result
8935  // Restore the top frame descriptors from the stack.
8936  __ pop(r3);
8937  __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
8938  __ str(r3, MemOperand(ip));
8939
8940  // Reset the stack to the callee saved registers.
8941  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
8942
8943  // Restore callee-saved registers and return.
8944#ifdef DEBUG
8945  if (FLAG_debug_code) {
8946    __ mov(lr, Operand(pc));
8947  }
8948#endif
8949  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
8950}
8951
8952
8953// This stub performs an instanceof, calling the builtin function if
8954// necessary.  Uses r1 for the object, r0 for the function that it may
8955// be an instance of (these are fetched from the stack).
8956void InstanceofStub::Generate(MacroAssembler* masm) {
8957  // Get the object - slow case for smis (we may need to throw an exception
8958  // depending on the rhs).
8959  Label slow, loop, is_instance, is_not_instance;
8960  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
8961  __ BranchOnSmi(r0, &slow);
8962
8963  // Check that the left hand is a JS object and put map in r3.
8964  __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
8965  __ b(lt, &slow);
8966  __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
8967  __ b(gt, &slow);
8968
8969  // Get the prototype of the function (r4 is result, r2 is scratch).
8970  __ ldr(r1, MemOperand(sp, 0));
8971  // r1 is function, r3 is map.
8972
8973  // Look up the function and the map in the instanceof cache.
8974  Label miss;
8975  __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
8976  __ cmp(r1, ip);
8977  __ b(ne, &miss);
8978  __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
8979  __ cmp(r3, ip);
8980  __ b(ne, &miss);
8981  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
8982  __ pop();
8983  __ pop();
8984  __ mov(pc, Operand(lr));
8985
8986  __ bind(&miss);
8987  __ TryGetFunctionPrototype(r1, r4, r2, &slow);
8988
8989  // Check that the function prototype is a JS object.
8990  __ BranchOnSmi(r4, &slow);
8991  __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
8992  __ b(lt, &slow);
8993  __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
8994  __ b(gt, &slow);
8995
8996  __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
8997  __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
8998
8999  // Register mapping: r3 is object map and r4 is function prototype.
9000  // Get prototype of object into r2.
9001  __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
9002
9003  // Loop through the prototype chain looking for the function prototype.
9004  __ bind(&loop);
9005  __ cmp(r2, Operand(r4));
9006  __ b(eq, &is_instance);
9007  __ LoadRoot(ip, Heap::kNullValueRootIndex);
9008  __ cmp(r2, ip);
9009  __ b(eq, &is_not_instance);
9010  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
9011  __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
9012  __ jmp(&loop);
9013
9014  __ bind(&is_instance);
9015  __ mov(r0, Operand(Smi::FromInt(0)));
9016  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9017  __ pop();
9018  __ pop();
9019  __ mov(pc, Operand(lr));  // Return.
9020
9021  __ bind(&is_not_instance);
9022  __ mov(r0, Operand(Smi::FromInt(1)));
9023  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
9024  __ pop();
9025  __ pop();
9026  __ mov(pc, Operand(lr));  // Return.
9027
9028  // Slow-case.  Tail call builtin.
9029  __ bind(&slow);
9030  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
9031}
9032
9033
9034void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
9035  // The displacement is the offset of the last parameter (if any)
9036  // relative to the frame pointer.
9037  static const int kDisplacement =
9038      StandardFrameConstants::kCallerSPOffset - kPointerSize;
9039
9040  // Check that the key is a smi.
9041  Label slow;
9042  __ BranchOnNotSmi(r1, &slow);
9043
9044  // Check if the calling frame is an arguments adaptor frame.
9045  Label adaptor;
9046  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
9047  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
9048  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
9049  __ b(eq, &adaptor);
9050
9051  // Check index against formal parameters count limit passed in
9052  // through register r0. Use unsigned comparison to get negative
9053  // check for free.
9054  __ cmp(r1, r0);
9055  __ b(cs, &slow);
9056
9057  // Read the argument from the stack and return it.
9058  __ sub(r3, r0, r1);
9059  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
9060  __ ldr(r0, MemOperand(r3, kDisplacement));
9061  __ Jump(lr);
9062
9063  // Arguments adaptor case: Check index against actual arguments
9064  // limit found in the arguments adaptor frame. Use unsigned
9065  // comparison to get negative check for free.
9066  __ bind(&adaptor);
9067  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
9068  __ cmp(r1, r0);
9069  __ b(cs, &slow);
9070
9071  // Read the argument from the adaptor frame and return it.
9072  __ sub(r3, r0, r1);
9073  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
9074  __ ldr(r0, MemOperand(r3, kDisplacement));
9075  __ Jump(lr);
9076
9077  // Slow-case: Handle non-smi or out-of-bounds access to arguments
9078  // by calling the runtime system.
9079  __ bind(&slow);
9080  __ push(r1);
9081  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
9082}
9083
9084
9085void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
9086  // sp[0] : number of parameters
9087  // sp[4] : receiver displacement
9088  // sp[8] : function
9089
9090  // Check if the calling frame is an arguments adaptor frame.
9091  Label adaptor_frame, try_allocate, runtime;
9092  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
9093  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
9094  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
9095  __ b(eq, &adaptor_frame);
9096
9097  // Get the length from the frame.
9098  __ ldr(r1, MemOperand(sp, 0));
9099  __ b(&try_allocate);
9100
9101  // Patch the arguments.length and the parameters pointer.
9102  __ bind(&adaptor_frame);
9103  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
9104  __ str(r1, MemOperand(sp, 0));
9105  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
9106  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
9107  __ str(r3, MemOperand(sp, 1 * kPointerSize));
9108
9109  // Try the new space allocation. Start out with computing the size
9110  // of the arguments object and the elements array in words.
9111  Label add_arguments_object;
9112  __ bind(&try_allocate);
9113  __ cmp(r1, Operand(0));
9114  __ b(eq, &add_arguments_object);
9115  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
9116  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
9117  __ bind(&add_arguments_object);
9118  __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
9119
9120  // Do the allocation of both objects in one go.
9121  __ AllocateInNewSpace(
9122      r1,
9123      r0,
9124      r2,
9125      r3,
9126      &runtime,
9127      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
9128
9129  // Get the arguments boilerplate from the current (global) context.
9130  int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
9131  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
9132  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
9133  __ ldr(r4, MemOperand(r4, offset));
9134
9135  // Copy the JS object part.
9136  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
9137    __ ldr(r3, FieldMemOperand(r4, i));
9138    __ str(r3, FieldMemOperand(r0, i));
9139  }
9140
9141  // Setup the callee in-object property.
9142  ASSERT(Heap::arguments_callee_index == 0);
9143  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
9144  __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
9145
9146  // Get the length (smi tagged) and set that as an in-object property too.
9147  ASSERT(Heap::arguments_length_index == 1);
9148  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
9149  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
9150
9151  // If there are no actual arguments, we're done.
9152  Label done;
9153  __ cmp(r1, Operand(0));
9154  __ b(eq, &done);
9155
9156  // Get the parameters pointer from the stack.
9157  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
9158
9159  // Setup the elements pointer in the allocated arguments object and
9160  // initialize the header in the elements fixed array.
9161  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
9162  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
9163  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
9164  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
9165  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
9166  __ mov(r1, Operand(r1, LSR, kSmiTagSize));  // Untag the length for the loop.
9167
9168  // Copy the fixed array slots.
9169  Label loop;
9170  // Setup r4 to point to the first array slot.
9171  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
9172  __ bind(&loop);
9173  // Pre-decrement r2 with kPointerSize on each iteration.
9174  // Pre-decrement in order to skip receiver.
9175  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
9176  // Post-increment r4 with kPointerSize on each iteration.
9177  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
9178  __ sub(r1, r1, Operand(1));
9179  __ cmp(r1, Operand(0));
9180  __ b(ne, &loop);
9181
9182  // Return and remove the on-stack parameters.
9183  __ bind(&done);
9184  __ add(sp, sp, Operand(3 * kPointerSize));
9185  __ Ret();
9186
9187  // Do the runtime call to allocate the arguments object.
9188  __ bind(&runtime);
9189  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
9190}
9191
9192
9193void RegExpExecStub::Generate(MacroAssembler* masm) {
9194  // Just jump directly to runtime if native RegExp is not selected at compile
9195  // time or if regexp entry in generated code is turned off runtime switch or
9196  // at compilation.
9197#ifdef V8_INTERPRETED_REGEXP
9198  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9199#else  // V8_INTERPRETED_REGEXP
9200  if (!FLAG_regexp_entry_native) {
9201    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9202    return;
9203  }
9204
9205  // Stack frame on entry.
9206  //  sp[0]: last_match_info (expected JSArray)
9207  //  sp[4]: previous index
9208  //  sp[8]: subject string
9209  //  sp[12]: JSRegExp object
9210
9211  static const int kLastMatchInfoOffset = 0 * kPointerSize;
9212  static const int kPreviousIndexOffset = 1 * kPointerSize;
9213  static const int kSubjectOffset = 2 * kPointerSize;
9214  static const int kJSRegExpOffset = 3 * kPointerSize;
9215
9216  Label runtime, invoke_regexp;
9217
9218  // Allocation of registers for this function. These are in callee save
9219  // registers and will be preserved by the call to the native RegExp code, as
9220  // this code is called using the normal C calling convention. When calling
9221  // directly from generated code the native RegExp code will not do a GC and
9222  // therefore the content of these registers are safe to use after the call.
9223  Register subject = r4;
9224  Register regexp_data = r5;
9225  Register last_match_info_elements = r6;
9226
9227  // Ensure that a RegExp stack is allocated.
9228  ExternalReference address_of_regexp_stack_memory_address =
9229      ExternalReference::address_of_regexp_stack_memory_address();
9230  ExternalReference address_of_regexp_stack_memory_size =
9231      ExternalReference::address_of_regexp_stack_memory_size();
9232  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
9233  __ ldr(r0, MemOperand(r0, 0));
9234  __ tst(r0, Operand(r0));
9235  __ b(eq, &runtime);
9236
9237  // Check that the first argument is a JSRegExp object.
9238  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
9239  ASSERT_EQ(0, kSmiTag);
9240  __ tst(r0, Operand(kSmiTagMask));
9241  __ b(eq, &runtime);
9242  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
9243  __ b(ne, &runtime);
9244
9245  // Check that the RegExp has been compiled (data contains a fixed array).
9246  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
9247  if (FLAG_debug_code) {
9248    __ tst(regexp_data, Operand(kSmiTagMask));
9249    __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
9250    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
9251    __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
9252  }
9253
9254  // regexp_data: RegExp data (FixedArray)
9255  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
9256  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
9257  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
9258  __ b(ne, &runtime);
9259
9260  // regexp_data: RegExp data (FixedArray)
9261  // Check that the number of captures fit in the static offsets vector buffer.
9262  __ ldr(r2,
9263         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
9264  // Calculate number of capture registers (number_of_captures + 1) * 2. This
9265  // uses the asumption that smis are 2 * their untagged value.
9266  ASSERT_EQ(0, kSmiTag);
9267  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9268  __ add(r2, r2, Operand(2));  // r2 was a smi.
9269  // Check that the static offsets vector buffer is large enough.
9270  __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
9271  __ b(hi, &runtime);
9272
9273  // r2: Number of capture registers
9274  // regexp_data: RegExp data (FixedArray)
9275  // Check that the second argument is a string.
9276  __ ldr(subject, MemOperand(sp, kSubjectOffset));
9277  __ tst(subject, Operand(kSmiTagMask));
9278  __ b(eq, &runtime);
9279  Condition is_string = masm->IsObjectStringType(subject, r0);
9280  __ b(NegateCondition(is_string), &runtime);
9281  // Get the length of the string to r3.
9282  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
9283
9284  // r2: Number of capture registers
9285  // r3: Length of subject string as a smi
9286  // subject: Subject string
9287  // regexp_data: RegExp data (FixedArray)
9288  // Check that the third argument is a positive smi less than the subject
9289  // string length. A negative value will be greater (unsigned comparison).
9290  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
9291  __ tst(r0, Operand(kSmiTagMask));
9292  __ b(ne, &runtime);
9293  __ cmp(r3, Operand(r0));
9294  __ b(ls, &runtime);
9295
9296  // r2: Number of capture registers
9297  // subject: Subject string
9298  // regexp_data: RegExp data (FixedArray)
9299  // Check that the fourth object is a JSArray object.
9300  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
9301  __ tst(r0, Operand(kSmiTagMask));
9302  __ b(eq, &runtime);
9303  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
9304  __ b(ne, &runtime);
9305  // Check that the JSArray is in fast case.
9306  __ ldr(last_match_info_elements,
9307         FieldMemOperand(r0, JSArray::kElementsOffset));
9308  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
9309  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
9310  __ cmp(r0, ip);
9311  __ b(ne, &runtime);
9312  // Check that the last match info has space for the capture registers and the
9313  // additional information.
9314  __ ldr(r0,
9315         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
9316  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
9317  __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
9318  __ b(gt, &runtime);
9319
9320  // subject: Subject string
9321  // regexp_data: RegExp data (FixedArray)
9322  // Check the representation and encoding of the subject string.
9323  Label seq_string;
9324  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
9325  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
9326  // First check for flat string.
9327  __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
9328  ASSERT_EQ(0, kStringTag | kSeqStringTag);
9329  __ b(eq, &seq_string);
9330
9331  // subject: Subject string
9332  // regexp_data: RegExp data (FixedArray)
9333  // Check for flat cons string.
9334  // A flat cons string is a cons string where the second part is the empty
9335  // string. In that case the subject string is just the first part of the cons
9336  // string. Also in this case the first part of the cons string is known to be
9337  // a sequential string or an external string.
9338  ASSERT(kExternalStringTag !=0);
9339  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
9340  __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
9341  __ b(ne, &runtime);
9342  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
9343  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
9344  __ cmp(r0, r1);
9345  __ b(ne, &runtime);
9346  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
9347  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
9348  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
9349  // Is first part a flat string?
9350  ASSERT_EQ(0, kSeqStringTag);
9351  __ tst(r0, Operand(kStringRepresentationMask));
9352  __ b(nz, &runtime);
9353
9354  __ bind(&seq_string);
9355  // subject: Subject string
9356  // regexp_data: RegExp data (FixedArray)
9357  // r0: Instance type of subject string
9358  ASSERT_EQ(4, kAsciiStringTag);
9359  ASSERT_EQ(0, kTwoByteStringTag);
9360  // Find the code object based on the assumptions above.
9361  __ and_(r0, r0, Operand(kStringEncodingMask));
9362  __ mov(r3, Operand(r0, ASR, 2), SetCC);
9363  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
9364  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
9365
9366  // Check that the irregexp code has been generated for the actual string
9367  // encoding. If it has, the field contains a code object otherwise it contains
9368  // the hole.
9369  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
9370  __ b(ne, &runtime);
9371
9372  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
9373  // r7: code
9374  // subject: Subject string
9375  // regexp_data: RegExp data (FixedArray)
9376  // Load used arguments before starting to push arguments for call to native
9377  // RegExp code to avoid handling changing stack height.
9378  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
9379  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
9380
9381  // r1: previous index
9382  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
9383  // r7: code
9384  // subject: Subject string
9385  // regexp_data: RegExp data (FixedArray)
9386  // All checks done. Now push arguments for native regexp code.
9387  __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
9388
9389  static const int kRegExpExecuteArguments = 7;
9390  __ push(lr);
9391  __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
9392
9393  // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
9394  __ mov(r0, Operand(1));
9395  __ str(r0, MemOperand(sp, 2 * kPointerSize));
9396
9397  // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
9398  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
9399  __ ldr(r0, MemOperand(r0, 0));
9400  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
9401  __ ldr(r2, MemOperand(r2, 0));
9402  __ add(r0, r0, Operand(r2));
9403  __ str(r0, MemOperand(sp, 1 * kPointerSize));
9404
9405  // Argument 5 (sp[0]): static offsets vector buffer.
9406  __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
9407  __ str(r0, MemOperand(sp, 0 * kPointerSize));
9408
9409  // For arguments 4 and 3 get string length, calculate start of string data and
9410  // calculate the shift of the index (0 for ASCII and 1 for two byte).
9411  __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
9412  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
9413  ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
9414  __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
9415  __ eor(r3, r3, Operand(1));
9416  // Argument 4 (r3): End of string data
9417  // Argument 3 (r2): Start of string data
9418  __ add(r2, r9, Operand(r1, LSL, r3));
9419  __ add(r3, r9, Operand(r0, LSL, r3));
9420
9421  // Argument 2 (r1): Previous index.
9422  // Already there
9423
9424  // Argument 1 (r0): Subject string.
9425  __ mov(r0, subject);
9426
9427  // Locate the code entry and call it.
9428  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
9429  __ CallCFunction(r7, kRegExpExecuteArguments);
9430  __ pop(lr);
9431
9432  // r0: result
9433  // subject: subject string (callee saved)
9434  // regexp_data: RegExp data (callee saved)
9435  // last_match_info_elements: Last match info elements (callee saved)
9436
9437  // Check the result.
9438  Label success;
9439  __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
9440  __ b(eq, &success);
9441  Label failure;
9442  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
9443  __ b(eq, &failure);
9444  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
9445  // If not exception it can only be retry. Handle that in the runtime system.
9446  __ b(ne, &runtime);
9447  // Result must now be exception. If there is no pending exception already a
9448  // stack overflow (on the backtrack stack) was detected in RegExp code but
9449  // haven't created the exception yet. Handle that in the runtime system.
9450  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
9451  __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
9452  __ ldr(r0, MemOperand(r0, 0));
9453  __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
9454  __ ldr(r1, MemOperand(r1, 0));
9455  __ cmp(r0, r1);
9456  __ b(eq, &runtime);
9457  __ bind(&failure);
9458  // For failure and exception return null.
9459  __ mov(r0, Operand(Factory::null_value()));
9460  __ add(sp, sp, Operand(4 * kPointerSize));
9461  __ Ret();
9462
9463  // Process the result from the native regexp code.
9464  __ bind(&success);
9465  __ ldr(r1,
9466         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
9467  // Calculate number of capture registers (number_of_captures + 1) * 2.
9468  ASSERT_EQ(0, kSmiTag);
9469  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
9470  __ add(r1, r1, Operand(2));  // r1 was a smi.
9471
9472  // r1: number of capture registers
9473  // r4: subject string
9474  // Store the capture count.
9475  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi.
9476  __ str(r2, FieldMemOperand(last_match_info_elements,
9477                             RegExpImpl::kLastCaptureCountOffset));
9478  // Store last subject and last input.
9479  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
9480  __ str(subject,
9481         FieldMemOperand(last_match_info_elements,
9482                         RegExpImpl::kLastSubjectOffset));
9483  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
9484  __ str(subject,
9485         FieldMemOperand(last_match_info_elements,
9486                         RegExpImpl::kLastInputOffset));
9487  __ mov(r3, last_match_info_elements);
9488  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
9489
9490  // Get the static offsets vector filled by the native regexp code.
9491  ExternalReference address_of_static_offsets_vector =
9492      ExternalReference::address_of_static_offsets_vector();
9493  __ mov(r2, Operand(address_of_static_offsets_vector));
9494
9495  // r1: number of capture registers
9496  // r2: offsets vector
9497  Label next_capture, done;
9498  // Capture register counter starts from number of capture registers and
9499  // counts down until wraping after zero.
9500  __ add(r0,
9501         last_match_info_elements,
9502         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
9503  __ bind(&next_capture);
9504  __ sub(r1, r1, Operand(1), SetCC);
9505  __ b(mi, &done);
9506  // Read the value from the static offsets vector buffer.
9507  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
9508  // Store the smi value in the last match info.
9509  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
9510  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
9511  __ jmp(&next_capture);
9512  __ bind(&done);
9513
9514  // Return last match info.
9515  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
9516  __ add(sp, sp, Operand(4 * kPointerSize));
9517  __ Ret();
9518
9519  // Do the runtime call to execute the regexp.
9520  __ bind(&runtime);
9521  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
9522#endif  // V8_INTERPRETED_REGEXP
9523}
9524
9525
9526void CallFunctionStub::Generate(MacroAssembler* masm) {
9527  Label slow;
9528
9529  // If the receiver might be a value (string, number or boolean) check for this
9530  // and box it if it is.
9531  if (ReceiverMightBeValue()) {
9532    // Get the receiver from the stack.
9533    // function, receiver [, arguments]
9534    Label receiver_is_value, receiver_is_js_object;
9535    __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
9536
9537    // Check if receiver is a smi (which is a number value).
9538    __ BranchOnSmi(r1, &receiver_is_value);
9539
9540    // Check if the receiver is a valid JS object.
9541    __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
9542    __ b(ge, &receiver_is_js_object);
9543
9544    // Call the runtime to box the value.
9545    __ bind(&receiver_is_value);
9546    __ EnterInternalFrame();
9547    __ push(r1);
9548    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
9549    __ LeaveInternalFrame();
9550    __ str(r0, MemOperand(sp, argc_ * kPointerSize));
9551
9552    __ bind(&receiver_is_js_object);
9553  }
9554
9555  // Get the function to call from the stack.
9556  // function, receiver [, arguments]
9557  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
9558
9559  // Check that the function is really a JavaScript function.
9560  // r1: pushed function (to be verified)
9561  __ BranchOnSmi(r1, &slow);
9562  // Get the map of the function object.
9563  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
9564  __ b(ne, &slow);
9565
9566  // Fast-case: Invoke the function now.
9567  // r1: pushed function
9568  ParameterCount actual(argc_);
9569  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
9570
9571  // Slow-case: Non-function called.
9572  __ bind(&slow);
9573  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
9574  // of the original receiver from the call site).
9575  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
9576  __ mov(r0, Operand(argc_));  // Setup the number of arguments.
9577  __ mov(r2, Operand(0));
9578  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
9579  __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
9580          RelocInfo::CODE_TARGET);
9581}
9582
9583
9584// Unfortunately you have to run without snapshots to see most of these
9585// names in the profile since most compare stubs end up in the snapshot.
9586const char* CompareStub::GetName() {
9587  if (name_ != NULL) return name_;
9588  const int kMaxNameLength = 100;
9589  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
9590  if (name_ == NULL) return "OOM";
9591
9592  const char* cc_name;
9593  switch (cc_) {
9594    case lt: cc_name = "LT"; break;
9595    case gt: cc_name = "GT"; break;
9596    case le: cc_name = "LE"; break;
9597    case ge: cc_name = "GE"; break;
9598    case eq: cc_name = "EQ"; break;
9599    case ne: cc_name = "NE"; break;
9600    default: cc_name = "UnknownCondition"; break;
9601  }
9602
9603  const char* strict_name = "";
9604  if (strict_ && (cc_ == eq || cc_ == ne)) {
9605    strict_name = "_STRICT";
9606  }
9607
9608  const char* never_nan_nan_name = "";
9609  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
9610    never_nan_nan_name = "_NO_NAN";
9611  }
9612
9613  const char* include_number_compare_name = "";
9614  if (!include_number_compare_) {
9615    include_number_compare_name = "_NO_NUMBER";
9616  }
9617
9618  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
9619               "CompareStub_%s%s%s%s",
9620               cc_name,
9621               strict_name,
9622               never_nan_nan_name,
9623               include_number_compare_name);
9624  return name_;
9625}
9626
9627
9628int CompareStub::MinorKey() {
9629  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
9630  // stubs the never NaN NaN condition is only taken into account if the
9631  // condition is equals.
9632  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
9633  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
9634         | StrictField::encode(strict_)
9635         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
9636         | IncludeNumberCompareField::encode(include_number_compare_);
9637}
9638
9639
9640// StringCharCodeAtGenerator
9641
9642void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
9643  Label flat_string;
9644  Label ascii_string;
9645  Label got_char_code;
9646
9647  // If the receiver is a smi trigger the non-string case.
9648  __ BranchOnSmi(object_, receiver_not_string_);
9649
9650  // Fetch the instance type of the receiver into result register.
9651  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
9652  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
9653  // If the receiver is not a string trigger the non-string case.
9654  __ tst(result_, Operand(kIsNotStringMask));
9655  __ b(ne, receiver_not_string_);
9656
9657  // If the index is non-smi trigger the non-smi case.
9658  __ BranchOnNotSmi(index_, &index_not_smi_);
9659
9660  // Put smi-tagged index into scratch register.
9661  __ mov(scratch_, index_);
9662  __ bind(&got_smi_index_);
9663
9664  // Check for index out of range.
9665  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
9666  __ cmp(ip, Operand(scratch_));
9667  __ b(ls, index_out_of_range_);
9668
9669  // We need special handling for non-flat strings.
9670  ASSERT(kSeqStringTag == 0);
9671  __ tst(result_, Operand(kStringRepresentationMask));
9672  __ b(eq, &flat_string);
9673
9674  // Handle non-flat strings.
9675  __ tst(result_, Operand(kIsConsStringMask));
9676  __ b(eq, &call_runtime_);
9677
9678  // ConsString.
9679  // Check whether the right hand side is the empty string (i.e. if
9680  // this is really a flat string in a cons string). If that is not
9681  // the case we would rather go to the runtime system now to flatten
9682  // the string.
9683  __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
9684  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
9685  __ cmp(result_, Operand(ip));
9686  __ b(ne, &call_runtime_);
9687  // Get the first of the two strings and load its instance type.
9688  __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
9689  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
9690  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
9691  // If the first cons component is also non-flat, then go to runtime.
9692  ASSERT(kSeqStringTag == 0);
9693  __ tst(result_, Operand(kStringRepresentationMask));
9694  __ b(nz, &call_runtime_);
9695
9696  // Check for 1-byte or 2-byte string.
9697  __ bind(&flat_string);
9698  ASSERT(kAsciiStringTag != 0);
9699  __ tst(result_, Operand(kStringEncodingMask));
9700  __ b(nz, &ascii_string);
9701
9702  // 2-byte string.
9703  // Load the 2-byte character code into the result register. We can
9704  // add without shifting since the smi tag size is the log2 of the
9705  // number of bytes in a two-byte character.
9706  ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
9707  __ add(scratch_, object_, Operand(scratch_));
9708  __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
9709  __ jmp(&got_char_code);
9710
9711  // ASCII string.
9712  // Load the byte into the result register.
9713  __ bind(&ascii_string);
9714  __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
9715  __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
9716
9717  __ bind(&got_char_code);
9718  __ mov(result_, Operand(result_, LSL, kSmiTagSize));
9719  __ bind(&exit_);
9720}
9721
9722
9723void StringCharCodeAtGenerator::GenerateSlow(
9724    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
9725  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
9726
9727  // Index is not a smi.
9728  __ bind(&index_not_smi_);
9729  // If index is a heap number, try converting it to an integer.
9730  __ CheckMap(index_,
9731              scratch_,
9732              Heap::kHeapNumberMapRootIndex,
9733              index_not_number_,
9734              true);
9735  call_helper.BeforeCall(masm);
9736  __ Push(object_, index_);
9737  __ push(index_);  // Consumed by runtime conversion function.
9738  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
9739    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
9740  } else {
9741    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
9742    // NumberToSmi discards numbers that are not exact integers.
9743    __ CallRuntime(Runtime::kNumberToSmi, 1);
9744  }
9745  if (!scratch_.is(r0)) {
9746    // Save the conversion result before the pop instructions below
9747    // have a chance to overwrite it.
9748    __ mov(scratch_, r0);
9749  }
9750  __ pop(index_);
9751  __ pop(object_);
9752  // Reload the instance type.
9753  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
9754  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
9755  call_helper.AfterCall(masm);
9756  // If index is still not a smi, it must be out of range.
9757  __ BranchOnNotSmi(scratch_, index_out_of_range_);
9758  // Otherwise, return to the fast path.
9759  __ jmp(&got_smi_index_);
9760
9761  // Call runtime. We get here when the receiver is a string and the
9762  // index is a number, but the code of getting the actual character
9763  // is too complex (e.g., when the string needs to be flattened).
9764  __ bind(&call_runtime_);
9765  call_helper.BeforeCall(masm);
9766  __ Push(object_, index_);
9767  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
9768  if (!result_.is(r0)) {
9769    __ mov(result_, r0);
9770  }
9771  call_helper.AfterCall(masm);
9772  __ jmp(&exit_);
9773
9774  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
9775}
9776
9777
9778// -------------------------------------------------------------------------
9779// StringCharFromCodeGenerator
9780
9781void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
9782  // Fast case of Heap::LookupSingleCharacterStringFromCode.
9783  ASSERT(kSmiTag == 0);
9784  ASSERT(kSmiShiftSize == 0);
9785  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
9786  __ tst(code_,
9787         Operand(kSmiTagMask |
9788                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
9789  __ b(nz, &slow_case_);
9790
9791  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
9792  // At this point code register contains smi tagged ascii char code.
9793  ASSERT(kSmiTag == 0);
9794  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
9795  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
9796  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
9797  __ cmp(result_, Operand(ip));
9798  __ b(eq, &slow_case_);
9799  __ bind(&exit_);
9800}
9801
9802
9803void StringCharFromCodeGenerator::GenerateSlow(
9804    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
9805  __ Abort("Unexpected fallthrough to CharFromCode slow case");
9806
9807  __ bind(&slow_case_);
9808  call_helper.BeforeCall(masm);
9809  __ push(code_);
9810  __ CallRuntime(Runtime::kCharFromCode, 1);
9811  if (!result_.is(r0)) {
9812    __ mov(result_, r0);
9813  }
9814  call_helper.AfterCall(masm);
9815  __ jmp(&exit_);
9816
9817  __ Abort("Unexpected fallthrough from CharFromCode slow case");
9818}
9819
9820
9821// -------------------------------------------------------------------------
9822// StringCharAtGenerator
9823
9824void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
9825  char_code_at_generator_.GenerateFast(masm);
9826  char_from_code_generator_.GenerateFast(masm);
9827}
9828
9829
9830void StringCharAtGenerator::GenerateSlow(
9831    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
9832  char_code_at_generator_.GenerateSlow(masm, call_helper);
9833  char_from_code_generator_.GenerateSlow(masm, call_helper);
9834}
9835
9836
9837void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
9838                                          Register dest,
9839                                          Register src,
9840                                          Register count,
9841                                          Register scratch,
9842                                          bool ascii) {
9843  Label loop;
9844  Label done;
9845  // This loop just copies one character at a time, as it is only used for very
9846  // short strings.
9847  if (!ascii) {
9848    __ add(count, count, Operand(count), SetCC);
9849  } else {
9850    __ cmp(count, Operand(0));
9851  }
9852  __ b(eq, &done);
9853
9854  __ bind(&loop);
9855  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
9856  // Perform sub between load and dependent store to get the load time to
9857  // complete.
9858  __ sub(count, count, Operand(1), SetCC);
9859  __ strb(scratch, MemOperand(dest, 1, PostIndex));
9860  // last iteration.
9861  __ b(gt, &loop);
9862
9863  __ bind(&done);
9864}
9865
9866
9867enum CopyCharactersFlags {
9868  COPY_ASCII = 1,
9869  DEST_ALWAYS_ALIGNED = 2
9870};
9871
9872
9873void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
9874                                              Register dest,
9875                                              Register src,
9876                                              Register count,
9877                                              Register scratch1,
9878                                              Register scratch2,
9879                                              Register scratch3,
9880                                              Register scratch4,
9881                                              Register scratch5,
9882                                              int flags) {
9883  bool ascii = (flags & COPY_ASCII) != 0;
9884  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
9885
9886  if (dest_always_aligned && FLAG_debug_code) {
9887    // Check that destination is actually word aligned if the flag says
9888    // that it is.
9889    __ tst(dest, Operand(kPointerAlignmentMask));
9890    __ Check(eq, "Destination of copy not aligned.");
9891  }
9892
9893  const int kReadAlignment = 4;
9894  const int kReadAlignmentMask = kReadAlignment - 1;
9895  // Ensure that reading an entire aligned word containing the last character
9896  // of a string will not read outside the allocated area (because we pad up
9897  // to kObjectAlignment).
9898  ASSERT(kObjectAlignment >= kReadAlignment);
9899  // Assumes word reads and writes are little endian.
9900  // Nothing to do for zero characters.
9901  Label done;
9902  if (!ascii) {
9903    __ add(count, count, Operand(count), SetCC);
9904  } else {
9905    __ cmp(count, Operand(0));
9906  }
9907  __ b(eq, &done);
9908
9909  // Assume that you cannot read (or write) unaligned.
9910  Label byte_loop;
9911  // Must copy at least eight bytes, otherwise just do it one byte at a time.
9912  __ cmp(count, Operand(8));
9913  __ add(count, dest, Operand(count));
9914  Register limit = count;  // Read until src equals this.
9915  __ b(lt, &byte_loop);
9916
9917  if (!dest_always_aligned) {
9918    // Align dest by byte copying. Copies between zero and three bytes.
9919    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
9920    Label dest_aligned;
9921    __ b(eq, &dest_aligned);
9922    __ cmp(scratch4, Operand(2));
9923    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
9924    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
9925    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
9926    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9927    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
9928    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
9929    __ bind(&dest_aligned);
9930  }
9931
9932  Label simple_loop;
9933
9934  __ sub(scratch4, dest, Operand(src));
9935  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
9936  __ b(eq, &simple_loop);
9937  // Shift register is number of bits in a source word that
9938  // must be combined with bits in the next source word in order
9939  // to create a destination word.
9940
9941  // Complex loop for src/dst that are not aligned the same way.
9942  {
9943    Label loop;
9944    __ mov(scratch4, Operand(scratch4, LSL, 3));
9945    Register left_shift = scratch4;
9946    __ and_(src, src, Operand(~3));  // Round down to load previous word.
9947    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
9948    // Store the "shift" most significant bits of scratch in the least
9949    // signficant bits (i.e., shift down by (32-shift)).
9950    __ rsb(scratch2, left_shift, Operand(32));
9951    Register right_shift = scratch2;
9952    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
9953
9954    __ bind(&loop);
9955    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
9956    __ sub(scratch5, limit, Operand(dest));
9957    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
9958    __ str(scratch1, MemOperand(dest, 4, PostIndex));
9959    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
9960    // Loop if four or more bytes left to copy.
9961    // Compare to eight, because we did the subtract before increasing dst.
9962    __ sub(scratch5, scratch5, Operand(8), SetCC);
9963    __ b(ge, &loop);
9964  }
9965  // There is now between zero and three bytes left to copy (negative that
9966  // number is in scratch5), and between one and three bytes already read into
9967  // scratch1 (eight times that number in scratch4). We may have read past
9968  // the end of the string, but because objects are aligned, we have not read
9969  // past the end of the object.
9970  // Find the minimum of remaining characters to move and preloaded characters
9971  // and write those as bytes.
9972  __ add(scratch5, scratch5, Operand(4), SetCC);
9973  __ b(eq, &done);
9974  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
9975  // Move minimum of bytes read and bytes left to copy to scratch4.
9976  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
9977  // Between one and three (value in scratch5) characters already read into
9978  // scratch ready to write.
9979  __ cmp(scratch5, Operand(2));
9980  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
9981  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
9982  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
9983  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
9984  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
9985  // Copy any remaining bytes.
9986  __ b(&byte_loop);
9987
9988  // Simple loop.
9989  // Copy words from src to dst, until less than four bytes left.
9990  // Both src and dest are word aligned.
9991  __ bind(&simple_loop);
9992  {
9993    Label loop;
9994    __ bind(&loop);
9995    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
9996    __ sub(scratch3, limit, Operand(dest));
9997    __ str(scratch1, MemOperand(dest, 4, PostIndex));
9998    // Compare to 8, not 4, because we do the substraction before increasing
9999    // dest.
10000    __ cmp(scratch3, Operand(8));
10001    __ b(ge, &loop);
10002  }
10003
10004  // Copy bytes from src to dst until dst hits limit.
10005  __ bind(&byte_loop);
10006  __ cmp(dest, Operand(limit));
10007  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
10008  __ b(ge, &done);
10009  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
10010  __ b(&byte_loop);
10011
10012  __ bind(&done);
10013}
10014
10015
10016void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
10017                                                        Register c1,
10018                                                        Register c2,
10019                                                        Register scratch1,
10020                                                        Register scratch2,
10021                                                        Register scratch3,
10022                                                        Register scratch4,
10023                                                        Register scratch5,
10024                                                        Label* not_found) {
10025  // Register scratch3 is the general scratch register in this function.
10026  Register scratch = scratch3;
10027
10028  // Make sure that both characters are not digits as such strings has a
10029  // different hash algorithm. Don't try to look for these in the symbol table.
10030  Label not_array_index;
10031  __ sub(scratch, c1, Operand(static_cast<int>('0')));
10032  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
10033  __ b(hi, &not_array_index);
10034  __ sub(scratch, c2, Operand(static_cast<int>('0')));
10035  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
10036
10037  // If check failed combine both characters into single halfword.
10038  // This is required by the contract of the method: code at the
10039  // not_found branch expects this combination in c1 register
10040  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
10041  __ b(ls, not_found);
10042
10043  __ bind(&not_array_index);
10044  // Calculate the two character string hash.
10045  Register hash = scratch1;
10046  StringHelper::GenerateHashInit(masm, hash, c1);
10047  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
10048  StringHelper::GenerateHashGetHash(masm, hash);
10049
10050  // Collect the two characters in a register.
10051  Register chars = c1;
10052  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
10053
10054  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10055  // hash:  hash of two character string.
10056
10057  // Load symbol table
10058  // Load address of first element of the symbol table.
10059  Register symbol_table = c2;
10060  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
10061
10062  // Load undefined value
10063  Register undefined = scratch4;
10064  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
10065
10066  // Calculate capacity mask from the symbol table capacity.
10067  Register mask = scratch2;
10068  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
10069  __ mov(mask, Operand(mask, ASR, 1));
10070  __ sub(mask, mask, Operand(1));
10071
10072  // Calculate untagged address of the first element of the symbol table.
10073  Register first_symbol_table_element = symbol_table;
10074  __ add(first_symbol_table_element, symbol_table,
10075         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
10076
10077  // Registers
10078  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
10079  // hash:  hash of two character string
10080  // mask:  capacity mask
10081  // first_symbol_table_element: address of the first element of
10082  //                             the symbol table
10083  // scratch: -
10084
10085  // Perform a number of probes in the symbol table.
10086  static const int kProbes = 4;
10087  Label found_in_symbol_table;
10088  Label next_probe[kProbes];
10089  for (int i = 0; i < kProbes; i++) {
10090    Register candidate = scratch5;  // Scratch register contains candidate.
10091
10092    // Calculate entry in symbol table.
10093    if (i > 0) {
10094      __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
10095    } else {
10096      __ mov(candidate, hash);
10097    }
10098
10099    __ and_(candidate, candidate, Operand(mask));
10100
10101    // Load the entry from the symble table.
10102    ASSERT_EQ(1, SymbolTable::kEntrySize);
10103    __ ldr(candidate,
10104           MemOperand(first_symbol_table_element,
10105                      candidate,
10106                      LSL,
10107                      kPointerSizeLog2));
10108
10109    // If entry is undefined no string with this hash can be found.
10110    __ cmp(candidate, undefined);
10111    __ b(eq, not_found);
10112
10113    // If length is not 2 the string is not a candidate.
10114    __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
10115    __ cmp(scratch, Operand(Smi::FromInt(2)));
10116    __ b(ne, &next_probe[i]);
10117
10118    // Check that the candidate is a non-external ascii string.
10119    __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
10120    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
10121    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
10122                                              &next_probe[i]);
10123
10124    // Check if the two characters match.
10125    // Assumes that word load is little endian.
10126    __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
10127    __ cmp(chars, scratch);
10128    __ b(eq, &found_in_symbol_table);
10129    __ bind(&next_probe[i]);
10130  }
10131
10132  // No matching 2 character string found by probing.
10133  __ jmp(not_found);
10134
10135  // Scratch register contains result when we fall through to here.
10136  Register result = scratch;
10137  __ bind(&found_in_symbol_table);
10138  __ Move(r0, result);
10139}
10140
10141
10142void StringHelper::GenerateHashInit(MacroAssembler* masm,
10143                                    Register hash,
10144                                    Register character) {
10145  // hash = character + (character << 10);
10146  __ add(hash, character, Operand(character, LSL, 10));
10147  // hash ^= hash >> 6;
10148  __ eor(hash, hash, Operand(hash, ASR, 6));
10149}
10150
10151
10152void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
10153                                            Register hash,
10154                                            Register character) {
10155  // hash += character;
10156  __ add(hash, hash, Operand(character));
10157  // hash += hash << 10;
10158  __ add(hash, hash, Operand(hash, LSL, 10));
10159  // hash ^= hash >> 6;
10160  __ eor(hash, hash, Operand(hash, ASR, 6));
10161}
10162
10163
10164void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
10165                                       Register hash) {
10166  // hash += hash << 3;
10167  __ add(hash, hash, Operand(hash, LSL, 3));
10168  // hash ^= hash >> 11;
10169  __ eor(hash, hash, Operand(hash, ASR, 11));
10170  // hash += hash << 15;
10171  __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
10172
10173  // if (hash == 0) hash = 27;
10174  __ mov(hash, Operand(27), LeaveCC, nz);
10175}
10176
10177
10178void SubStringStub::Generate(MacroAssembler* masm) {
10179  Label runtime;
10180
10181  // Stack frame on entry.
10182  //  lr: return address
10183  //  sp[0]: to
10184  //  sp[4]: from
10185  //  sp[8]: string
10186
10187  // This stub is called from the native-call %_SubString(...), so
10188  // nothing can be assumed about the arguments. It is tested that:
10189  //  "string" is a sequential string,
10190  //  both "from" and "to" are smis, and
10191  //  0 <= from <= to <= string.length.
10192  // If any of these assumptions fail, we call the runtime system.
10193
10194  static const int kToOffset = 0 * kPointerSize;
10195  static const int kFromOffset = 1 * kPointerSize;
10196  static const int kStringOffset = 2 * kPointerSize;
10197
10198
10199  // Check bounds and smi-ness.
10200  __ ldr(r7, MemOperand(sp, kToOffset));
10201  __ ldr(r6, MemOperand(sp, kFromOffset));
10202  ASSERT_EQ(0, kSmiTag);
10203  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
10204  // I.e., arithmetic shift right by one un-smi-tags.
10205  __ mov(r2, Operand(r7, ASR, 1), SetCC);
10206  __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
10207  // If either r2 or r6 had the smi tag bit set, then carry is set now.
10208  __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
10209  __ b(mi, &runtime);  // From is negative.
10210
10211  __ sub(r2, r2, Operand(r3), SetCC);
10212  __ b(mi, &runtime);  // Fail if from > to.
10213  // Special handling of sub-strings of length 1 and 2. One character strings
10214  // are handled in the runtime system (looked up in the single character
10215  // cache). Two character strings are looked for in the symbol cache.
10216  __ cmp(r2, Operand(2));
10217  __ b(lt, &runtime);
10218
10219  // r2: length
10220  // r3: from index (untaged smi)
10221  // r6: from (smi)
10222  // r7: to (smi)
10223
10224  // Make sure first argument is a sequential (or flat) string.
10225  __ ldr(r5, MemOperand(sp, kStringOffset));
10226  ASSERT_EQ(0, kSmiTag);
10227  __ tst(r5, Operand(kSmiTagMask));
10228  __ b(eq, &runtime);
10229  Condition is_string = masm->IsObjectStringType(r5, r1);
10230  __ b(NegateCondition(is_string), &runtime);
10231
10232  // r1: instance type
10233  // r2: length
10234  // r3: from index (untaged smi)
10235  // r5: string
10236  // r6: from (smi)
10237  // r7: to (smi)
10238  Label seq_string;
10239  __ and_(r4, r1, Operand(kStringRepresentationMask));
10240  ASSERT(kSeqStringTag < kConsStringTag);
10241  ASSERT(kExternalStringTag > kConsStringTag);
10242  __ cmp(r4, Operand(kConsStringTag));
10243  __ b(gt, &runtime);  // External strings go to runtime.
10244  __ b(lt, &seq_string);  // Sequential strings are handled directly.
10245
10246  // Cons string. Try to recurse (once) on the first substring.
10247  // (This adds a little more generality than necessary to handle flattened
10248  // cons strings, but not much).
10249  __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
10250  __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
10251  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10252  __ tst(r1, Operand(kStringRepresentationMask));
10253  ASSERT_EQ(0, kSeqStringTag);
10254  __ b(ne, &runtime);  // Cons and External strings go to runtime.
10255
10256  // Definitly a sequential string.
10257  __ bind(&seq_string);
10258
10259  // r1: instance type.
10260  // r2: length
10261  // r3: from index (untaged smi)
10262  // r5: string
10263  // r6: from (smi)
10264  // r7: to (smi)
10265  __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
10266  __ cmp(r4, Operand(r7));
10267  __ b(lt, &runtime);  // Fail if to > length.
10268
10269  // r1: instance type.
10270  // r2: result string length.
10271  // r3: from index (untaged smi)
10272  // r5: string.
10273  // r6: from offset (smi)
10274  // Check for flat ascii string.
10275  Label non_ascii_flat;
10276  __ tst(r1, Operand(kStringEncodingMask));
10277  ASSERT_EQ(0, kTwoByteStringTag);
10278  __ b(eq, &non_ascii_flat);
10279
10280  Label result_longer_than_two;
10281  __ cmp(r2, Operand(2));
10282  __ b(gt, &result_longer_than_two);
10283
10284  // Sub string of length 2 requested.
10285  // Get the two characters forming the sub string.
10286  __ add(r5, r5, Operand(r3));
10287  __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
10288  __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
10289
10290  // Try to lookup two character string in symbol table.
10291  Label make_two_character_string;
10292  StringHelper::GenerateTwoCharacterSymbolTableProbe(
10293      masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
10294  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10295  __ add(sp, sp, Operand(3 * kPointerSize));
10296  __ Ret();
10297
10298  // r2: result string length.
10299  // r3: two characters combined into halfword in little endian byte order.
10300  __ bind(&make_two_character_string);
10301  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
10302  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
10303  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10304  __ add(sp, sp, Operand(3 * kPointerSize));
10305  __ Ret();
10306
10307  __ bind(&result_longer_than_two);
10308
10309  // Allocate the result.
10310  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
10311
10312  // r0: result string.
10313  // r2: result string length.
10314  // r5: string.
10315  // r6: from offset (smi)
10316  // Locate first character of result.
10317  __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10318  // Locate 'from' character of string.
10319  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10320  __ add(r5, r5, Operand(r6, ASR, 1));
10321
10322  // r0: result string.
10323  // r1: first character of result string.
10324  // r2: result string length.
10325  // r5: first character of sub string to copy.
10326  ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
10327  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
10328                                           COPY_ASCII | DEST_ALWAYS_ALIGNED);
10329  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10330  __ add(sp, sp, Operand(3 * kPointerSize));
10331  __ Ret();
10332
10333  __ bind(&non_ascii_flat);
10334  // r2: result string length.
10335  // r5: string.
10336  // r6: from offset (smi)
10337  // Check for flat two byte string.
10338
10339  // Allocate the result.
10340  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
10341
10342  // r0: result string.
10343  // r2: result string length.
10344  // r5: string.
10345  // Locate first character of result.
10346  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10347  // Locate 'from' character of string.
10348    __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10349  // As "from" is a smi it is 2 times the value which matches the size of a two
10350  // byte character.
10351  __ add(r5, r5, Operand(r6));
10352
10353  // r0: result string.
10354  // r1: first character of result.
10355  // r2: result length.
10356  // r5: first character of string to copy.
10357  ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
10358  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
10359                                           DEST_ALWAYS_ALIGNED);
10360  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
10361  __ add(sp, sp, Operand(3 * kPointerSize));
10362  __ Ret();
10363
10364  // Just jump to runtime to create the sub string.
10365  __ bind(&runtime);
10366  __ TailCallRuntime(Runtime::kSubString, 3, 1);
10367}
10368
10369
10370void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
10371                                                        Register left,
10372                                                        Register right,
10373                                                        Register scratch1,
10374                                                        Register scratch2,
10375                                                        Register scratch3,
10376                                                        Register scratch4) {
10377  Label compare_lengths;
10378  // Find minimum length and length difference.
10379  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
10380  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
10381  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
10382  Register length_delta = scratch3;
10383  __ mov(scratch1, scratch2, LeaveCC, gt);
10384  Register min_length = scratch1;
10385  ASSERT(kSmiTag == 0);
10386  __ tst(min_length, Operand(min_length));
10387  __ b(eq, &compare_lengths);
10388
10389  // Untag smi.
10390  __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
10391
10392  // Setup registers so that we only need to increment one register
10393  // in the loop.
10394  __ add(scratch2, min_length,
10395         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10396  __ add(left, left, Operand(scratch2));
10397  __ add(right, right, Operand(scratch2));
10398  // Registers left and right points to the min_length character of strings.
10399  __ rsb(min_length, min_length, Operand(-1));
10400  Register index = min_length;
10401  // Index starts at -min_length.
10402
10403  {
10404    // Compare loop.
10405    Label loop;
10406    __ bind(&loop);
10407    // Compare characters.
10408    __ add(index, index, Operand(1), SetCC);
10409    __ ldrb(scratch2, MemOperand(left, index), ne);
10410    __ ldrb(scratch4, MemOperand(right, index), ne);
10411    // Skip to compare lengths with eq condition true.
10412    __ b(eq, &compare_lengths);
10413    __ cmp(scratch2, scratch4);
10414    __ b(eq, &loop);
10415    // Fallthrough with eq condition false.
10416  }
10417  // Compare lengths -  strings up to min-length are equal.
10418  __ bind(&compare_lengths);
10419  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
10420  // Use zero length_delta as result.
10421  __ mov(r0, Operand(length_delta), SetCC, eq);
10422  // Fall through to here if characters compare not-equal.
10423  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
10424  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
10425  __ Ret();
10426}
10427
10428
10429void StringCompareStub::Generate(MacroAssembler* masm) {
10430  Label runtime;
10431
10432  // Stack frame on entry.
10433  //  sp[0]: right string
10434  //  sp[4]: left string
10435  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // left
10436  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // right
10437
10438  Label not_same;
10439  __ cmp(r0, r1);
10440  __ b(ne, &not_same);
10441  ASSERT_EQ(0, EQUAL);
10442  ASSERT_EQ(0, kSmiTag);
10443  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
10444  __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
10445  __ add(sp, sp, Operand(2 * kPointerSize));
10446  __ Ret();
10447
10448  __ bind(&not_same);
10449
10450  // Check that both objects are sequential ascii strings.
10451  __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
10452
10453  // Compare flat ascii strings natively. Remove arguments from stack first.
10454  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
10455  __ add(sp, sp, Operand(2 * kPointerSize));
10456  GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
10457
10458  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
10459  // tagged as a small integer.
10460  __ bind(&runtime);
10461  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
10462}
10463
10464
10465void StringAddStub::Generate(MacroAssembler* masm) {
10466  Label string_add_runtime;
10467  // Stack on entry:
10468  // sp[0]: second argument.
10469  // sp[4]: first argument.
10470
10471  // Load the two arguments.
10472  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
10473  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
10474
10475  // Make sure that both arguments are strings if not known in advance.
10476  if (string_check_) {
10477    ASSERT_EQ(0, kSmiTag);
10478    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
10479    // Load instance types.
10480    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
10481    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
10482    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10483    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
10484    ASSERT_EQ(0, kStringTag);
10485    // If either is not a string, go to runtime.
10486    __ tst(r4, Operand(kIsNotStringMask));
10487    __ tst(r5, Operand(kIsNotStringMask), eq);
10488    __ b(ne, &string_add_runtime);
10489  }
10490
10491  // Both arguments are strings.
10492  // r0: first string
10493  // r1: second string
10494  // r4: first string instance type (if string_check_)
10495  // r5: second string instance type (if string_check_)
10496  {
10497    Label strings_not_empty;
10498    // Check if either of the strings are empty. In that case return the other.
10499    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
10500    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
10501    ASSERT(kSmiTag == 0);
10502    __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
10503    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
10504    ASSERT(kSmiTag == 0);
10505     // Else test if second string is empty.
10506    __ cmp(r3, Operand(Smi::FromInt(0)), ne);
10507    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
10508
10509    __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10510    __ add(sp, sp, Operand(2 * kPointerSize));
10511    __ Ret();
10512
10513    __ bind(&strings_not_empty);
10514  }
10515
10516  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
10517  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
10518  // Both strings are non-empty.
10519  // r0: first string
10520  // r1: second string
10521  // r2: length of first string
10522  // r3: length of second string
10523  // r4: first string instance type (if string_check_)
10524  // r5: second string instance type (if string_check_)
10525  // Look at the length of the result of adding the two strings.
10526  Label string_add_flat_result, longer_than_two;
10527  // Adding two lengths can't overflow.
10528  ASSERT(String::kMaxLength * 2 > String::kMaxLength);
10529  __ add(r6, r2, Operand(r3));
10530  // Use the runtime system when adding two one character strings, as it
10531  // contains optimizations for this specific case using the symbol table.
10532  __ cmp(r6, Operand(2));
10533  __ b(ne, &longer_than_two);
10534
10535  // Check that both strings are non-external ascii strings.
10536  if (!string_check_) {
10537    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
10538    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
10539    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10540    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
10541  }
10542  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
10543                                                  &string_add_runtime);
10544
10545  // Get the two characters forming the sub string.
10546  __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
10547  __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
10548
10549  // Try to lookup two character string in symbol table. If it is not found
10550  // just allocate a new one.
10551  Label make_two_character_string;
10552  StringHelper::GenerateTwoCharacterSymbolTableProbe(
10553      masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
10554  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10555  __ add(sp, sp, Operand(2 * kPointerSize));
10556  __ Ret();
10557
10558  __ bind(&make_two_character_string);
10559  // Resulting string has length 2 and first chars of two strings
10560  // are combined into single halfword in r2 register.
10561  // So we can fill resulting string without two loops by a single
10562  // halfword store instruction (which assumes that processor is
10563  // in a little endian mode)
10564  __ mov(r6, Operand(2));
10565  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
10566  __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
10567  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10568  __ add(sp, sp, Operand(2 * kPointerSize));
10569  __ Ret();
10570
10571  __ bind(&longer_than_two);
10572  // Check if resulting string will be flat.
10573  __ cmp(r6, Operand(String::kMinNonFlatLength));
10574  __ b(lt, &string_add_flat_result);
10575  // Handle exceptionally long strings in the runtime system.
10576  ASSERT((String::kMaxLength & 0x80000000) == 0);
10577  ASSERT(IsPowerOf2(String::kMaxLength + 1));
10578  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
10579  __ cmp(r6, Operand(String::kMaxLength + 1));
10580  __ b(hs, &string_add_runtime);
10581
10582  // If result is not supposed to be flat, allocate a cons string object.
10583  // If both strings are ascii the result is an ascii cons string.
10584  if (!string_check_) {
10585    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
10586    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
10587    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10588    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
10589  }
10590  Label non_ascii, allocated, ascii_data;
10591  ASSERT_EQ(0, kTwoByteStringTag);
10592  __ tst(r4, Operand(kStringEncodingMask));
10593  __ tst(r5, Operand(kStringEncodingMask), ne);
10594  __ b(eq, &non_ascii);
10595
10596  // Allocate an ASCII cons string.
10597  __ bind(&ascii_data);
10598  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
10599  __ bind(&allocated);
10600  // Fill the fields of the cons string.
10601  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
10602  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
10603  __ mov(r0, Operand(r7));
10604  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10605  __ add(sp, sp, Operand(2 * kPointerSize));
10606  __ Ret();
10607
10608  __ bind(&non_ascii);
10609  // At least one of the strings is two-byte. Check whether it happens
10610  // to contain only ascii characters.
10611  // r4: first instance type.
10612  // r5: second instance type.
10613  __ tst(r4, Operand(kAsciiDataHintMask));
10614  __ tst(r5, Operand(kAsciiDataHintMask), ne);
10615  __ b(ne, &ascii_data);
10616  __ eor(r4, r4, Operand(r5));
10617  ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
10618  __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
10619  __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
10620  __ b(eq, &ascii_data);
10621
10622  // Allocate a two byte cons string.
10623  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
10624  __ jmp(&allocated);
10625
10626  // Handle creating a flat result. First check that both strings are
10627  // sequential and that they have the same encoding.
10628  // r0: first string
10629  // r1: second string
10630  // r2: length of first string
10631  // r3: length of second string
10632  // r4: first string instance type (if string_check_)
10633  // r5: second string instance type (if string_check_)
10634  // r6: sum of lengths.
10635  __ bind(&string_add_flat_result);
10636  if (!string_check_) {
10637    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
10638    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
10639    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
10640    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
10641  }
10642  // Check that both strings are sequential.
10643  ASSERT_EQ(0, kSeqStringTag);
10644  __ tst(r4, Operand(kStringRepresentationMask));
10645  __ tst(r5, Operand(kStringRepresentationMask), eq);
10646  __ b(ne, &string_add_runtime);
10647  // Now check if both strings have the same encoding (ASCII/Two-byte).
10648  // r0: first string.
10649  // r1: second string.
10650  // r2: length of first string.
10651  // r3: length of second string.
10652  // r6: sum of lengths..
10653  Label non_ascii_string_add_flat_result;
10654  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
10655  __ eor(r7, r4, Operand(r5));
10656  __ tst(r7, Operand(kStringEncodingMask));
10657  __ b(ne, &string_add_runtime);
10658  // And see if it's ASCII or two-byte.
10659  __ tst(r4, Operand(kStringEncodingMask));
10660  __ b(eq, &non_ascii_string_add_flat_result);
10661
10662  // Both strings are sequential ASCII strings. We also know that they are
10663  // short (since the sum of the lengths is less than kMinNonFlatLength).
10664  // r6: length of resulting flat string
10665  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
10666  // Locate first character of result.
10667  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10668  // Locate first character of first argument.
10669  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10670  // r0: first character of first string.
10671  // r1: second string.
10672  // r2: length of first string.
10673  // r3: length of second string.
10674  // r6: first character of result.
10675  // r7: result string.
10676  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
10677
10678  // Load second argument and locate first character.
10679  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
10680  // r1: first character of second string.
10681  // r3: length of second string.
10682  // r6: next character of result.
10683  // r7: result string.
10684  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
10685  __ mov(r0, Operand(r7));
10686  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10687  __ add(sp, sp, Operand(2 * kPointerSize));
10688  __ Ret();
10689
10690  __ bind(&non_ascii_string_add_flat_result);
10691  // Both strings are sequential two byte strings.
10692  // r0: first string.
10693  // r1: second string.
10694  // r2: length of first string.
10695  // r3: length of second string.
10696  // r6: sum of length of strings.
10697  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
10698  // r0: first string.
10699  // r1: second string.
10700  // r2: length of first string.
10701  // r3: length of second string.
10702  // r7: result string.
10703
10704  // Locate first character of result.
10705  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10706  // Locate first character of first argument.
10707  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10708
10709  // r0: first character of first string.
10710  // r1: second string.
10711  // r2: length of first string.
10712  // r3: length of second string.
10713  // r6: first character of result.
10714  // r7: result string.
10715  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
10716
10717  // Locate first character of second argument.
10718  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
10719
10720  // r1: first character of second string.
10721  // r3: length of second string.
10722  // r6: next character of result (after copy of first string).
10723  // r7: result string.
10724  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
10725
10726  __ mov(r0, Operand(r7));
10727  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
10728  __ add(sp, sp, Operand(2 * kPointerSize));
10729  __ Ret();
10730
10731  // Just jump to runtime to add the two strings.
10732  __ bind(&string_add_runtime);
10733  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
10734}
10735
10736
10737#undef __
10738
10739} }  // namespace v8::internal
10740
10741#endif  // V8_TARGET_ARCH_ARM
10742