codegen-x64.cc revision 44f0eee88ff00398ff7f715fab053374d808c90d
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "codegen-inl.h"
35#include "compiler.h"
36#include "debug.h"
37#include "ic-inl.h"
38#include "parser.h"
39#include "regexp-macro-assembler.h"
40#include "register-allocator-inl.h"
41#include "scopes.h"
42#include "virtual-frame-inl.h"
43
44namespace v8 {
45namespace internal {
46
47#define __ ACCESS_MASM(masm)
48
49// -------------------------------------------------------------------------
50// Platform-specific FrameRegisterState functions.
51
52void FrameRegisterState::Save(MacroAssembler* masm) const {
53  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
54    int action = registers_[i];
55    if (action == kPush) {
56      __ push(RegisterAllocator::ToRegister(i));
57    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
58      __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
59    }
60  }
61}
62
63
64void FrameRegisterState::Restore(MacroAssembler* masm) const {
65  // Restore registers in reverse order due to the stack.
66  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
67    int action = registers_[i];
68    if (action == kPush) {
69      __ pop(RegisterAllocator::ToRegister(i));
70    } else if (action != kIgnore) {
71      action &= ~kSyncedFlag;
72      __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
73    }
74  }
75}
76
77
78#undef __
79#define __ ACCESS_MASM(masm_)
80
81// -------------------------------------------------------------------------
82// Platform-specific DeferredCode functions.
83
84void DeferredCode::SaveRegisters() {
85  frame_state_.Save(masm_);
86}
87
88
89void DeferredCode::RestoreRegisters() {
90  frame_state_.Restore(masm_);
91}
92
93
94// -------------------------------------------------------------------------
95// Platform-specific RuntimeCallHelper functions.
96
97void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
98  frame_state_->Save(masm);
99}
100
101
102void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
103  frame_state_->Restore(masm);
104}
105
106
107void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
108  masm->EnterInternalFrame();
109}
110
111
112void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
113  masm->LeaveInternalFrame();
114}
115
116
117// -------------------------------------------------------------------------
118// CodeGenState implementation.
119
120CodeGenState::CodeGenState(CodeGenerator* owner)
121    : owner_(owner),
122      destination_(NULL),
123      previous_(NULL) {
124  owner_->set_state(this);
125}
126
127
128CodeGenState::CodeGenState(CodeGenerator* owner,
129                           ControlDestination* destination)
130    : owner_(owner),
131      destination_(destination),
132      previous_(owner->state()) {
133  owner_->set_state(this);
134}
135
136
137CodeGenState::~CodeGenState() {
138  ASSERT(owner_->state() == this);
139  owner_->set_state(previous_);
140}
141
142
143// -------------------------------------------------------------------------
144// CodeGenerator implementation.
145
146CodeGenerator::CodeGenerator(MacroAssembler* masm)
147    : deferred_(8),
148      masm_(masm),
149      info_(NULL),
150      frame_(NULL),
151      allocator_(NULL),
152      state_(NULL),
153      loop_nesting_(0),
154      function_return_is_shadowed_(false),
155      in_spilled_code_(false) {
156}
157
158
159// Calling conventions:
160// rbp: caller's frame pointer
161// rsp: stack pointer
162// rdi: called JS function
163// rsi: callee's context
164
165void CodeGenerator::Generate(CompilationInfo* info) {
166  // Record the position for debugging purposes.
167  CodeForFunctionPosition(info->function());
168  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
169
170  // Initialize state.
171  info_ = info;
172  ASSERT(allocator_ == NULL);
173  RegisterAllocator register_allocator(this);
174  allocator_ = &register_allocator;
175  ASSERT(frame_ == NULL);
176  frame_ = new VirtualFrame();
177  set_in_spilled_code(false);
178
179  // Adjust for function-level loop nesting.
180  ASSERT_EQ(0, loop_nesting_);
181  loop_nesting_ = info->is_in_loop() ? 1 : 0;
182
183  Isolate::Current()->set_jump_target_compiling_deferred_code(false);
184
185  {
186    CodeGenState state(this);
187    // Entry:
188    // Stack: receiver, arguments, return address.
189    // rbp: caller's frame pointer
190    // rsp: stack pointer
191    // rdi: called JS function
192    // rsi: callee's context
193    allocator_->Initialize();
194
195#ifdef DEBUG
196    if (strlen(FLAG_stop_at) > 0 &&
197        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
198      frame_->SpillAll();
199      __ int3();
200    }
201#endif
202
203    frame_->Enter();
204
205    // Allocate space for locals and initialize them.
206    frame_->AllocateStackSlots();
207
208    // Allocate the local context if needed.
209    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
210    if (heap_slots > 0) {
211      Comment cmnt(masm_, "[ allocate local context");
212      // Allocate local context.
213      // Get outer context and create a new context based on it.
214      frame_->PushFunction();
215      Result context;
216      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
217        FastNewContextStub stub(heap_slots);
218        context = frame_->CallStub(&stub, 1);
219      } else {
220        context = frame_->CallRuntime(Runtime::kNewContext, 1);
221      }
222
223      // Update context local.
224      frame_->SaveContextRegister();
225
226      // Verify that the runtime call result and rsi agree.
227      if (FLAG_debug_code) {
228        __ cmpq(context.reg(), rsi);
229        __ Assert(equal, "Runtime::NewContext should end up in rsi");
230      }
231    }
232
233    // TODO(1241774): Improve this code:
234    // 1) only needed if we have a context
235    // 2) no need to recompute context ptr every single time
236    // 3) don't copy parameter operand code from SlotOperand!
237    {
238      Comment cmnt2(masm_, "[ copy context parameters into .context");
239      // Note that iteration order is relevant here! If we have the same
240      // parameter twice (e.g., function (x, y, x)), and that parameter
241      // needs to be copied into the context, it must be the last argument
242      // passed to the parameter that needs to be copied. This is a rare
243      // case so we don't check for it, instead we rely on the copying
244      // order: such a parameter is copied repeatedly into the same
245      // context location and thus the last value is what is seen inside
246      // the function.
247      for (int i = 0; i < scope()->num_parameters(); i++) {
248        Variable* par = scope()->parameter(i);
249        Slot* slot = par->AsSlot();
250        if (slot != NULL && slot->type() == Slot::CONTEXT) {
251          // The use of SlotOperand below is safe in unspilled code
252          // because the slot is guaranteed to be a context slot.
253          //
254          // There are no parameters in the global scope.
255          ASSERT(!scope()->is_global_scope());
256          frame_->PushParameterAt(i);
257          Result value = frame_->Pop();
258          value.ToRegister();
259
260          // SlotOperand loads context.reg() with the context object
261          // stored to, used below in RecordWrite.
262          Result context = allocator_->Allocate();
263          ASSERT(context.is_valid());
264          __ movq(SlotOperand(slot, context.reg()), value.reg());
265          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
266          Result scratch = allocator_->Allocate();
267          ASSERT(scratch.is_valid());
268          frame_->Spill(context.reg());
269          frame_->Spill(value.reg());
270          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
271        }
272      }
273    }
274
275    // Store the arguments object.  This must happen after context
276    // initialization because the arguments object may be stored in
277    // the context.
278    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
279      StoreArgumentsObject(true);
280    }
281
282    // Initialize ThisFunction reference if present.
283    if (scope()->is_function_scope() && scope()->function() != NULL) {
284      frame_->Push(FACTORY->the_hole_value());
285      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
286    }
287
288    // Initialize the function return target after the locals are set
289    // up, because it needs the expected frame height from the frame.
290    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
291    function_return_is_shadowed_ = false;
292
293    // Generate code to 'execute' declarations and initialize functions
294    // (source elements). In case of an illegal redeclaration we need to
295    // handle that instead of processing the declarations.
296    if (scope()->HasIllegalRedeclaration()) {
297      Comment cmnt(masm_, "[ illegal redeclarations");
298      scope()->VisitIllegalRedeclaration(this);
299    } else {
300      Comment cmnt(masm_, "[ declarations");
301      ProcessDeclarations(scope()->declarations());
302      // Bail out if a stack-overflow exception occurred when processing
303      // declarations.
304      if (HasStackOverflow()) return;
305    }
306
307    if (FLAG_trace) {
308      frame_->CallRuntime(Runtime::kTraceEnter, 0);
309      // Ignore the return value.
310    }
311    CheckStack();
312
313    // Compile the body of the function in a vanilla state. Don't
314    // bother compiling all the code if the scope has an illegal
315    // redeclaration.
316    if (!scope()->HasIllegalRedeclaration()) {
317      Comment cmnt(masm_, "[ function body");
318#ifdef DEBUG
319      bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
320      bool should_trace =
321          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
322      if (should_trace) {
323        frame_->CallRuntime(Runtime::kDebugTrace, 0);
324        // Ignore the return value.
325      }
326#endif
327      VisitStatements(info->function()->body());
328
329      // Handle the return from the function.
330      if (has_valid_frame()) {
331        // If there is a valid frame, control flow can fall off the end of
332        // the body.  In that case there is an implicit return statement.
333        ASSERT(!function_return_is_shadowed_);
334        CodeForReturnPosition(info->function());
335        frame_->PrepareForReturn();
336        Result undefined(FACTORY->undefined_value());
337        if (function_return_.is_bound()) {
338          function_return_.Jump(&undefined);
339        } else {
340          function_return_.Bind(&undefined);
341          GenerateReturnSequence(&undefined);
342        }
343      } else if (function_return_.is_linked()) {
344        // If the return target has dangling jumps to it, then we have not
345        // yet generated the return sequence.  This can happen when (a)
346        // control does not flow off the end of the body so we did not
347        // compile an artificial return statement just above, and (b) there
348        // are return statements in the body but (c) they are all shadowed.
349        Result return_value;
350        function_return_.Bind(&return_value);
351        GenerateReturnSequence(&return_value);
352      }
353    }
354  }
355
356  // Adjust for function-level loop nesting.
357  ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
358  loop_nesting_ = 0;
359
360  // Code generation state must be reset.
361  ASSERT(state_ == NULL);
362  ASSERT(!function_return_is_shadowed_);
363  function_return_.Unuse();
364  DeleteFrame();
365
366  // Process any deferred code using the register allocator.
367  if (!HasStackOverflow()) {
368    info->isolate()->set_jump_target_compiling_deferred_code(true);
369    ProcessDeferred();
370    info->isolate()->set_jump_target_compiling_deferred_code(false);
371  }
372
373  // There is no need to delete the register allocator, it is a
374  // stack-allocated local.
375  allocator_ = NULL;
376}
377
378
379Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
380  // Currently, this assertion will fail if we try to assign to
381  // a constant variable that is constant because it is read-only
382  // (such as the variable referring to a named function expression).
383  // We need to implement assignments to read-only variables.
384  // Ideally, we should do this during AST generation (by converting
385  // such assignments into expression statements); however, in general
386  // we may not be able to make the decision until past AST generation,
387  // that is when the entire program is known.
388  ASSERT(slot != NULL);
389  int index = slot->index();
390  switch (slot->type()) {
391    case Slot::PARAMETER:
392      return frame_->ParameterAt(index);
393
394    case Slot::LOCAL:
395      return frame_->LocalAt(index);
396
397    case Slot::CONTEXT: {
398      // Follow the context chain if necessary.
399      ASSERT(!tmp.is(rsi));  // do not overwrite context register
400      Register context = rsi;
401      int chain_length = scope()->ContextChainLength(slot->var()->scope());
402      for (int i = 0; i < chain_length; i++) {
403        // Load the closure.
404        // (All contexts, even 'with' contexts, have a closure,
405        // and it is the same for all contexts inside a function.
406        // There is no need to go to the function context first.)
407        __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
408        // Load the function context (which is the incoming, outer context).
409        __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
410        context = tmp;
411      }
412      // We may have a 'with' context now. Get the function context.
413      // (In fact this mov may never be the needed, since the scope analysis
414      // may not permit a direct context access in this case and thus we are
415      // always at a function context. However it is safe to dereference be-
416      // cause the function context of a function context is itself. Before
417      // deleting this mov we should try to create a counter-example first,
418      // though...)
419      __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
420      return ContextOperand(tmp, index);
421    }
422
423    default:
424      UNREACHABLE();
425      return Operand(rsp, 0);
426  }
427}
428
429
430Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
431                                                         Result tmp,
432                                                         JumpTarget* slow) {
433  ASSERT(slot->type() == Slot::CONTEXT);
434  ASSERT(tmp.is_register());
435  Register context = rsi;
436
437  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
438    if (s->num_heap_slots() > 0) {
439      if (s->calls_eval()) {
440        // Check that extension is NULL.
441        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
442                Immediate(0));
443        slow->Branch(not_equal, not_taken);
444      }
445      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
446      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
447      context = tmp.reg();
448    }
449  }
450  // Check that last extension is NULL.
451  __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
452  slow->Branch(not_equal, not_taken);
453  __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
454  return ContextOperand(tmp.reg(), slot->index());
455}
456
457
458// Emit code to load the value of an expression to the top of the
459// frame. If the expression is boolean-valued it may be compiled (or
460// partially compiled) into control flow to the control destination.
461// If force_control is true, control flow is forced.
462void CodeGenerator::LoadCondition(Expression* expr,
463                                  ControlDestination* dest,
464                                  bool force_control) {
465  ASSERT(!in_spilled_code());
466  int original_height = frame_->height();
467
468  { CodeGenState new_state(this, dest);
469    Visit(expr);
470
471    // If we hit a stack overflow, we may not have actually visited
472    // the expression.  In that case, we ensure that we have a
473    // valid-looking frame state because we will continue to generate
474    // code as we unwind the C++ stack.
475    //
476    // It's possible to have both a stack overflow and a valid frame
477    // state (eg, a subexpression overflowed, visiting it returned
478    // with a dummied frame state, and visiting this expression
479    // returned with a normal-looking state).
480    if (HasStackOverflow() &&
481        !dest->is_used() &&
482        frame_->height() == original_height) {
483      dest->Goto(true);
484    }
485  }
486
487  if (force_control && !dest->is_used()) {
488    // Convert the TOS value into flow to the control destination.
489    ToBoolean(dest);
490  }
491
492  ASSERT(!(force_control && !dest->is_used()));
493  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
494}
495
496
497void CodeGenerator::LoadAndSpill(Expression* expression) {
498  ASSERT(in_spilled_code());
499  set_in_spilled_code(false);
500  Load(expression);
501  frame_->SpillAll();
502  set_in_spilled_code(true);
503}
504
505
506void CodeGenerator::Load(Expression* expr) {
507#ifdef DEBUG
508  int original_height = frame_->height();
509#endif
510  ASSERT(!in_spilled_code());
511  JumpTarget true_target;
512  JumpTarget false_target;
513  ControlDestination dest(&true_target, &false_target, true);
514  LoadCondition(expr, &dest, false);
515
516  if (dest.false_was_fall_through()) {
517    // The false target was just bound.
518    JumpTarget loaded;
519    frame_->Push(FACTORY->false_value());
520    // There may be dangling jumps to the true target.
521    if (true_target.is_linked()) {
522      loaded.Jump();
523      true_target.Bind();
524      frame_->Push(FACTORY->true_value());
525      loaded.Bind();
526    }
527
528  } else if (dest.is_used()) {
529    // There is true, and possibly false, control flow (with true as
530    // the fall through).
531    JumpTarget loaded;
532    frame_->Push(FACTORY->true_value());
533    if (false_target.is_linked()) {
534      loaded.Jump();
535      false_target.Bind();
536      frame_->Push(FACTORY->false_value());
537      loaded.Bind();
538    }
539
540  } else {
541    // We have a valid value on top of the frame, but we still may
542    // have dangling jumps to the true and false targets from nested
543    // subexpressions (eg, the left subexpressions of the
544    // short-circuited boolean operators).
545    ASSERT(has_valid_frame());
546    if (true_target.is_linked() || false_target.is_linked()) {
547      JumpTarget loaded;
548      loaded.Jump();  // Don't lose the current TOS.
549      if (true_target.is_linked()) {
550        true_target.Bind();
551        frame_->Push(FACTORY->true_value());
552        if (false_target.is_linked()) {
553          loaded.Jump();
554        }
555      }
556      if (false_target.is_linked()) {
557        false_target.Bind();
558        frame_->Push(FACTORY->false_value());
559      }
560      loaded.Bind();
561    }
562  }
563
564  ASSERT(has_valid_frame());
565  ASSERT(frame_->height() == original_height + 1);
566}
567
568
569void CodeGenerator::LoadGlobal() {
570  if (in_spilled_code()) {
571    frame_->EmitPush(GlobalObjectOperand());
572  } else {
573    Result temp = allocator_->Allocate();
574    __ movq(temp.reg(), GlobalObjectOperand());
575    frame_->Push(&temp);
576  }
577}
578
579
580void CodeGenerator::LoadGlobalReceiver() {
581  Result temp = allocator_->Allocate();
582  Register reg = temp.reg();
583  __ movq(reg, GlobalObjectOperand());
584  __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
585  frame_->Push(&temp);
586}
587
588
589void CodeGenerator::LoadTypeofExpression(Expression* expr) {
590  // Special handling of identifiers as subexpressions of typeof.
591  Variable* variable = expr->AsVariableProxy()->AsVariable();
592  if (variable != NULL && !variable->is_this() && variable->is_global()) {
593    // For a global variable we build the property reference
594    // <global>.<variable> and perform a (regular non-contextual) property
595    // load to make sure we do not get reference errors.
596    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
597    Literal key(variable->name());
598    Property property(&global, &key, RelocInfo::kNoPosition);
599    Reference ref(this, &property);
600    ref.GetValue();
601  } else if (variable != NULL && variable->AsSlot() != NULL) {
602    // For a variable that rewrites to a slot, we signal it is the immediate
603    // subexpression of a typeof.
604    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
605  } else {
606    // Anything else can be handled normally.
607    Load(expr);
608  }
609}
610
611
612ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
613  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
614
615  // In strict mode there is no need for shadow arguments.
616  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
617  // We don't want to do lazy arguments allocation for functions that
618  // have heap-allocated contexts, because it interfers with the
619  // uninitialized const tracking in the context objects.
620  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
621      ? EAGER_ARGUMENTS_ALLOCATION
622      : LAZY_ARGUMENTS_ALLOCATION;
623}
624
625
626Result CodeGenerator::StoreArgumentsObject(bool initial) {
627  ArgumentsAllocationMode mode = ArgumentsMode();
628  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
629
630  Comment cmnt(masm_, "[ store arguments object");
631  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
632    // When using lazy arguments allocation, we store the arguments marker value
633    // as a sentinel indicating that the arguments object hasn't been
634    // allocated yet.
635    frame_->Push(FACTORY->arguments_marker());
636  } else {
637    ArgumentsAccessStub stub(is_strict_mode()
638        ? ArgumentsAccessStub::NEW_STRICT
639        : ArgumentsAccessStub::NEW_NON_STRICT);
640    frame_->PushFunction();
641    frame_->PushReceiverSlotAddress();
642    frame_->Push(Smi::FromInt(scope()->num_parameters()));
643    Result result = frame_->CallStub(&stub, 3);
644    frame_->Push(&result);
645  }
646
647  Variable* arguments = scope()->arguments();
648  Variable* shadow = scope()->arguments_shadow();
649  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
650  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
651         scope()->is_strict_mode());
652
653  JumpTarget done;
654  bool skip_arguments = false;
655  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
656    // We have to skip storing into the arguments slot if it has
657    // already been written to. This can happen if the a function
658    // has a local variable named 'arguments'.
659    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
660    Result probe = frame_->Pop();
661    if (probe.is_constant()) {
662      // We have to skip updating the arguments object if it has
663      // been assigned a proper value.
664      skip_arguments = !probe.handle()->IsArgumentsMarker();
665    } else {
666      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
667      probe.Unuse();
668      done.Branch(not_equal);
669    }
670  }
671  if (!skip_arguments) {
672    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
673    if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
674  }
675  if (shadow != NULL) {
676    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
677  }
678  return frame_->Pop();
679}
680
681//------------------------------------------------------------------------------
682// CodeGenerator implementation of variables, lookups, and stores.
683
684Reference::Reference(CodeGenerator* cgen,
685                     Expression* expression,
686                     bool  persist_after_get)
687    : cgen_(cgen),
688      expression_(expression),
689      type_(ILLEGAL),
690      persist_after_get_(persist_after_get) {
691  cgen->LoadReference(this);
692}
693
694
695Reference::~Reference() {
696  ASSERT(is_unloaded() || is_illegal());
697}
698
699
700void CodeGenerator::LoadReference(Reference* ref) {
701  // References are loaded from both spilled and unspilled code.  Set the
702  // state to unspilled to allow that (and explicitly spill after
703  // construction at the construction sites).
704  bool was_in_spilled_code = in_spilled_code_;
705  in_spilled_code_ = false;
706
707  Comment cmnt(masm_, "[ LoadReference");
708  Expression* e = ref->expression();
709  Property* property = e->AsProperty();
710  Variable* var = e->AsVariableProxy()->AsVariable();
711
712  if (property != NULL) {
713    // The expression is either a property or a variable proxy that rewrites
714    // to a property.
715    Load(property->obj());
716    if (property->key()->IsPropertyName()) {
717      ref->set_type(Reference::NAMED);
718    } else {
719      Load(property->key());
720      ref->set_type(Reference::KEYED);
721    }
722  } else if (var != NULL) {
723    // The expression is a variable proxy that does not rewrite to a
724    // property.  Global variables are treated as named property references.
725    if (var->is_global()) {
726      // If rax is free, the register allocator prefers it.  Thus the code
727      // generator will load the global object into rax, which is where
728      // LoadIC wants it.  Most uses of Reference call LoadIC directly
729      // after the reference is created.
730      frame_->Spill(rax);
731      LoadGlobal();
732      ref->set_type(Reference::NAMED);
733    } else {
734      ASSERT(var->AsSlot() != NULL);
735      ref->set_type(Reference::SLOT);
736    }
737  } else {
738    // Anything else is a runtime error.
739    Load(e);
740    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
741  }
742
743  in_spilled_code_ = was_in_spilled_code;
744}
745
746
747void CodeGenerator::UnloadReference(Reference* ref) {
748  // Pop a reference from the stack while preserving TOS.
749  Comment cmnt(masm_, "[ UnloadReference");
750  frame_->Nip(ref->size());
751  ref->set_unloaded();
752}
753
754
755// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
756// convert it to a boolean in the condition code register or jump to
757// 'false_target'/'true_target' as appropriate.
758void CodeGenerator::ToBoolean(ControlDestination* dest) {
759  Comment cmnt(masm_, "[ ToBoolean");
760
761  // The value to convert should be popped from the frame.
762  Result value = frame_->Pop();
763  value.ToRegister();
764
765  if (value.is_number()) {
766    // Fast case if TypeInfo indicates only numbers.
767    if (FLAG_debug_code) {
768      __ AbortIfNotNumber(value.reg());
769    }
770    // Smi => false iff zero.
771    __ Cmp(value.reg(), Smi::FromInt(0));
772    if (value.is_smi()) {
773      value.Unuse();
774      dest->Split(not_zero);
775    } else {
776      dest->false_target()->Branch(equal);
777      Condition is_smi = masm_->CheckSmi(value.reg());
778      dest->true_target()->Branch(is_smi);
779      __ xorpd(xmm0, xmm0);
780      __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
781      value.Unuse();
782      dest->Split(not_zero);
783    }
784  } else {
785    // Fast case checks.
786    // 'false' => false.
787    __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
788    dest->false_target()->Branch(equal);
789
790    // 'true' => true.
791    __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
792    dest->true_target()->Branch(equal);
793
794    // 'undefined' => false.
795    __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
796    dest->false_target()->Branch(equal);
797
798    // Smi => false iff zero.
799    __ Cmp(value.reg(), Smi::FromInt(0));
800    dest->false_target()->Branch(equal);
801    Condition is_smi = masm_->CheckSmi(value.reg());
802    dest->true_target()->Branch(is_smi);
803
804    // Call the stub for all other cases.
805    frame_->Push(&value);  // Undo the Pop() from above.
806    ToBooleanStub stub;
807    Result temp = frame_->CallStub(&stub, 1);
808    // Convert the result to a condition code.
809    __ testq(temp.reg(), temp.reg());
810    temp.Unuse();
811    dest->Split(not_equal);
812  }
813}
814
815
816// Call the specialized stub for a binary operation.
817class DeferredInlineBinaryOperation: public DeferredCode {
818 public:
819  DeferredInlineBinaryOperation(Token::Value op,
820                                Register dst,
821                                Register left,
822                                Register right,
823                                OverwriteMode mode)
824      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
825    set_comment("[ DeferredInlineBinaryOperation");
826  }
827
828  virtual void Generate();
829
830 private:
831  Token::Value op_;
832  Register dst_;
833  Register left_;
834  Register right_;
835  OverwriteMode mode_;
836};
837
838
839void DeferredInlineBinaryOperation::Generate() {
840  Label done;
841  if ((op_ == Token::ADD)
842      || (op_ == Token::SUB)
843      || (op_ == Token::MUL)
844      || (op_ == Token::DIV)) {
845    Label call_runtime;
846    Label left_smi, right_smi, load_right, do_op;
847    __ JumpIfSmi(left_, &left_smi);
848    __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
849                   Heap::kHeapNumberMapRootIndex);
850    __ j(not_equal, &call_runtime);
851    __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
852    if (mode_ == OVERWRITE_LEFT) {
853      __ movq(dst_, left_);
854    }
855    __ jmp(&load_right);
856
857    __ bind(&left_smi);
858    __ SmiToInteger32(left_, left_);
859    __ cvtlsi2sd(xmm0, left_);
860    __ Integer32ToSmi(left_, left_);
861    if (mode_ == OVERWRITE_LEFT) {
862      Label alloc_failure;
863      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
864    }
865
866    __ bind(&load_right);
867    __ JumpIfSmi(right_, &right_smi);
868    __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
869                   Heap::kHeapNumberMapRootIndex);
870    __ j(not_equal, &call_runtime);
871    __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
872    if (mode_ == OVERWRITE_RIGHT) {
873      __ movq(dst_, right_);
874    } else if (mode_ == NO_OVERWRITE) {
875      Label alloc_failure;
876      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
877    }
878    __ jmp(&do_op);
879
880    __ bind(&right_smi);
881    __ SmiToInteger32(right_, right_);
882    __ cvtlsi2sd(xmm1, right_);
883    __ Integer32ToSmi(right_, right_);
884    if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
885      Label alloc_failure;
886      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
887    }
888
889    __ bind(&do_op);
890    switch (op_) {
891      case Token::ADD: __ addsd(xmm0, xmm1); break;
892      case Token::SUB: __ subsd(xmm0, xmm1); break;
893      case Token::MUL: __ mulsd(xmm0, xmm1); break;
894      case Token::DIV: __ divsd(xmm0, xmm1); break;
895      default: UNREACHABLE();
896    }
897    __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
898    __ jmp(&done);
899
900    __ bind(&call_runtime);
901  }
902  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
903  stub.GenerateCall(masm_, left_, right_);
904  if (!dst_.is(rax)) __ movq(dst_, rax);
905  __ bind(&done);
906}
907
908
909static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
910                                  Token::Value op,
911                                  const Result& right,
912                                  const Result& left) {
913  // Set TypeInfo of result according to the operation performed.
914  // We rely on the fact that smis have a 32 bit payload on x64.
915  STATIC_ASSERT(kSmiValueSize == 32);
916  switch (op) {
917    case Token::COMMA:
918      return right.type_info();
919    case Token::OR:
920    case Token::AND:
921      // Result type can be either of the two input types.
922      return operands_type;
923    case Token::BIT_OR:
924    case Token::BIT_XOR:
925    case Token::BIT_AND:
926      // Result is always a smi.
927      return TypeInfo::Smi();
928    case Token::SAR:
929    case Token::SHL:
930      // Result is always a smi.
931      return TypeInfo::Smi();
932    case Token::SHR:
933      // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
934      return (right.is_constant() && right.handle()->IsSmi()
935                     && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
936          ? TypeInfo::Smi()
937          : TypeInfo::Number();
938    case Token::ADD:
939      if (operands_type.IsNumber()) {
940        return TypeInfo::Number();
941      } else if (left.type_info().IsString() || right.type_info().IsString()) {
942        return TypeInfo::String();
943      } else {
944        return TypeInfo::Unknown();
945      }
946    case Token::SUB:
947    case Token::MUL:
948    case Token::DIV:
949    case Token::MOD:
950      // Result is always a number.
951      return TypeInfo::Number();
952    default:
953      UNREACHABLE();
954  }
955  UNREACHABLE();
956  return TypeInfo::Unknown();
957}
958
959
960void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
961                                           OverwriteMode overwrite_mode) {
962  Comment cmnt(masm_, "[ BinaryOperation");
963  Token::Value op = expr->op();
964  Comment cmnt_token(masm_, Token::String(op));
965
966  if (op == Token::COMMA) {
967    // Simply discard left value.
968    frame_->Nip(1);
969    return;
970  }
971
972  Result right = frame_->Pop();
973  Result left = frame_->Pop();
974
975  if (op == Token::ADD) {
976    const bool left_is_string = left.type_info().IsString();
977    const bool right_is_string = right.type_info().IsString();
978    // Make sure constant strings have string type info.
979    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
980           left_is_string);
981    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
982           right_is_string);
983    if (left_is_string || right_is_string) {
984      frame_->Push(&left);
985      frame_->Push(&right);
986      Result answer;
987      if (left_is_string) {
988        if (right_is_string) {
989          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
990          answer = frame_->CallStub(&stub, 2);
991        } else {
992          answer =
993            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
994        }
995      } else if (right_is_string) {
996        answer =
997          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
998      }
999      answer.set_type_info(TypeInfo::String());
1000      frame_->Push(&answer);
1001      return;
1002    }
1003    // Neither operand is known to be a string.
1004  }
1005
1006  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
1007  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
1008  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
1009  bool right_is_non_smi_constant =
1010      right.is_constant() && !right.handle()->IsSmi();
1011
1012  if (left_is_smi_constant && right_is_smi_constant) {
1013    // Compute the constant result at compile time, and leave it on the frame.
1014    int left_int = Smi::cast(*left.handle())->value();
1015    int right_int = Smi::cast(*right.handle())->value();
1016    if (FoldConstantSmis(op, left_int, right_int)) return;
1017  }
1018
1019  // Get number type of left and right sub-expressions.
1020  TypeInfo operands_type =
1021      TypeInfo::Combine(left.type_info(), right.type_info());
1022
1023  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
1024
1025  Result answer;
1026  if (left_is_non_smi_constant || right_is_non_smi_constant) {
1027    // Go straight to the slow case, with no smi code.
1028    GenericBinaryOpStub stub(op,
1029                             overwrite_mode,
1030                             NO_SMI_CODE_IN_STUB,
1031                             operands_type);
1032    answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
1033  } else if (right_is_smi_constant) {
1034    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
1035                                        false, overwrite_mode);
1036  } else if (left_is_smi_constant) {
1037    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
1038                                        true, overwrite_mode);
1039  } else {
1040    // Set the flags based on the operation, type and loop nesting level.
1041    // Bit operations always assume they likely operate on smis. Still only
1042    // generate the inline Smi check code if this operation is part of a loop.
1043    // For all other operations only inline the Smi check code for likely smis
1044    // if the operation is part of a loop.
1045    if (loop_nesting() > 0 &&
1046        (Token::IsBitOp(op) ||
1047         operands_type.IsInteger32() ||
1048         expr->type()->IsLikelySmi())) {
1049      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
1050    } else {
1051      GenericBinaryOpStub stub(op,
1052                               overwrite_mode,
1053                               NO_GENERIC_BINARY_FLAGS,
1054                               operands_type);
1055      answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
1056    }
1057  }
1058
1059  answer.set_type_info(result_type);
1060  frame_->Push(&answer);
1061}
1062
1063
1064bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
1065  Object* answer_object = HEAP->undefined_value();
1066  switch (op) {
1067    case Token::ADD:
1068      // Use intptr_t to detect overflow of 32-bit int.
1069      if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
1070        answer_object = Smi::FromInt(left + right);
1071      }
1072      break;
1073    case Token::SUB:
1074      // Use intptr_t to detect overflow of 32-bit int.
1075      if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
1076        answer_object = Smi::FromInt(left - right);
1077      }
1078      break;
1079    case Token::MUL: {
1080        double answer = static_cast<double>(left) * right;
1081        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
1082          // If the product is zero and the non-zero factor is negative,
1083          // the spec requires us to return floating point negative zero.
1084          if (answer != 0 || (left >= 0 && right >= 0)) {
1085            answer_object = Smi::FromInt(static_cast<int>(answer));
1086          }
1087        }
1088      }
1089      break;
1090    case Token::DIV:
1091    case Token::MOD:
1092      break;
1093    case Token::BIT_OR:
1094      answer_object = Smi::FromInt(left | right);
1095      break;
1096    case Token::BIT_AND:
1097      answer_object = Smi::FromInt(left & right);
1098      break;
1099    case Token::BIT_XOR:
1100      answer_object = Smi::FromInt(left ^ right);
1101      break;
1102
1103    case Token::SHL: {
1104        int shift_amount = right & 0x1F;
1105        if (Smi::IsValid(left << shift_amount)) {
1106          answer_object = Smi::FromInt(left << shift_amount);
1107        }
1108        break;
1109      }
1110    case Token::SHR: {
1111        int shift_amount = right & 0x1F;
1112        unsigned int unsigned_left = left;
1113        unsigned_left >>= shift_amount;
1114        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
1115          answer_object = Smi::FromInt(unsigned_left);
1116        }
1117        break;
1118      }
1119    case Token::SAR: {
1120        int shift_amount = right & 0x1F;
1121        unsigned int unsigned_left = left;
1122        if (left < 0) {
1123          // Perform arithmetic shift of a negative number by
1124          // complementing number, logical shifting, complementing again.
1125          unsigned_left = ~unsigned_left;
1126          unsigned_left >>= shift_amount;
1127          unsigned_left = ~unsigned_left;
1128        } else {
1129          unsigned_left >>= shift_amount;
1130        }
1131        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
1132        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
1133        break;
1134      }
1135    default:
1136      UNREACHABLE();
1137      break;
1138  }
1139  if (answer_object->IsUndefined()) {
1140    return false;
1141  }
1142  frame_->Push(Handle<Object>(answer_object));
1143  return true;
1144}
1145
1146
1147void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
1148                                               Result* right,
1149                                               JumpTarget* both_smi) {
1150  TypeInfo left_info = left->type_info();
1151  TypeInfo right_info = right->type_info();
1152  if (left_info.IsDouble() || left_info.IsString() ||
1153      right_info.IsDouble() || right_info.IsString()) {
1154    // We know that left and right are not both smi.  Don't do any tests.
1155    return;
1156  }
1157
1158  if (left->reg().is(right->reg())) {
1159    if (!left_info.IsSmi()) {
1160      Condition is_smi = masm()->CheckSmi(left->reg());
1161      both_smi->Branch(is_smi);
1162    } else {
1163      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
1164      left->Unuse();
1165      right->Unuse();
1166      both_smi->Jump();
1167    }
1168  } else if (!left_info.IsSmi()) {
1169    if (!right_info.IsSmi()) {
1170      Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
1171      both_smi->Branch(is_smi);
1172    } else {
1173      Condition is_smi = masm()->CheckSmi(left->reg());
1174      both_smi->Branch(is_smi);
1175    }
1176  } else {
1177    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
1178    if (!right_info.IsSmi()) {
1179      Condition is_smi = masm()->CheckSmi(right->reg());
1180      both_smi->Branch(is_smi);
1181    } else {
1182      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
1183      left->Unuse();
1184      right->Unuse();
1185      both_smi->Jump();
1186    }
1187  }
1188}
1189
1190
1191void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
1192                                              TypeInfo type,
1193                                              DeferredCode* deferred) {
1194  if (!type.IsSmi()) {
1195        __ JumpIfNotSmi(reg, deferred->entry_label());
1196  }
1197  if (FLAG_debug_code) {
1198    __ AbortIfNotSmi(reg);
1199  }
1200}
1201
1202
1203void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
1204                                                  Register right,
1205                                                  TypeInfo left_info,
1206                                                  TypeInfo right_info,
1207                                                  DeferredCode* deferred) {
1208  if (!left_info.IsSmi() && !right_info.IsSmi()) {
1209    __ JumpIfNotBothSmi(left, right, deferred->entry_label());
1210  } else if (!left_info.IsSmi()) {
1211    __ JumpIfNotSmi(left, deferred->entry_label());
1212  } else if (!right_info.IsSmi()) {
1213    __ JumpIfNotSmi(right, deferred->entry_label());
1214  }
1215  if (FLAG_debug_code) {
1216    __ AbortIfNotSmi(left);
1217    __ AbortIfNotSmi(right);
1218  }
1219}
1220
1221
1222// Implements a binary operation using a deferred code object and some
1223// inline code to operate on smis quickly.
1224Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
1225                                               Result* left,
1226                                               Result* right,
1227                                               OverwriteMode overwrite_mode) {
1228  // Copy the type info because left and right may be overwritten.
1229  TypeInfo left_type_info = left->type_info();
1230  TypeInfo right_type_info = right->type_info();
1231  Token::Value op = expr->op();
1232  Result answer;
1233  // Special handling of div and mod because they use fixed registers.
1234  if (op == Token::DIV || op == Token::MOD) {
1235    // We need rax as the quotient register, rdx as the remainder
1236    // register, neither left nor right in rax or rdx, and left copied
1237    // to rax.
1238    Result quotient;
1239    Result remainder;
1240    bool left_is_in_rax = false;
1241    // Step 1: get rax for quotient.
1242    if ((left->is_register() && left->reg().is(rax)) ||
1243        (right->is_register() && right->reg().is(rax))) {
1244      // One or both is in rax.  Use a fresh non-rdx register for
1245      // them.
1246      Result fresh = allocator_->Allocate();
1247      ASSERT(fresh.is_valid());
1248      if (fresh.reg().is(rdx)) {
1249        remainder = fresh;
1250        fresh = allocator_->Allocate();
1251        ASSERT(fresh.is_valid());
1252      }
1253      if (left->is_register() && left->reg().is(rax)) {
1254        quotient = *left;
1255        *left = fresh;
1256        left_is_in_rax = true;
1257      }
1258      if (right->is_register() && right->reg().is(rax)) {
1259        quotient = *right;
1260        *right = fresh;
1261      }
1262      __ movq(fresh.reg(), rax);
1263    } else {
1264      // Neither left nor right is in rax.
1265      quotient = allocator_->Allocate(rax);
1266    }
1267    ASSERT(quotient.is_register() && quotient.reg().is(rax));
1268    ASSERT(!(left->is_register() && left->reg().is(rax)));
1269    ASSERT(!(right->is_register() && right->reg().is(rax)));
1270
1271    // Step 2: get rdx for remainder if necessary.
1272    if (!remainder.is_valid()) {
1273      if ((left->is_register() && left->reg().is(rdx)) ||
1274          (right->is_register() && right->reg().is(rdx))) {
1275        Result fresh = allocator_->Allocate();
1276        ASSERT(fresh.is_valid());
1277        if (left->is_register() && left->reg().is(rdx)) {
1278          remainder = *left;
1279          *left = fresh;
1280        }
1281        if (right->is_register() && right->reg().is(rdx)) {
1282          remainder = *right;
1283          *right = fresh;
1284        }
1285        __ movq(fresh.reg(), rdx);
1286      } else {
1287        // Neither left nor right is in rdx.
1288        remainder = allocator_->Allocate(rdx);
1289      }
1290    }
1291    ASSERT(remainder.is_register() && remainder.reg().is(rdx));
1292    ASSERT(!(left->is_register() && left->reg().is(rdx)));
1293    ASSERT(!(right->is_register() && right->reg().is(rdx)));
1294
1295    left->ToRegister();
1296    right->ToRegister();
1297    frame_->Spill(rax);
1298    frame_->Spill(rdx);
1299
1300    // Check that left and right are smi tagged.
1301    DeferredInlineBinaryOperation* deferred =
1302        new DeferredInlineBinaryOperation(op,
1303                                          (op == Token::DIV) ? rax : rdx,
1304                                          left->reg(),
1305                                          right->reg(),
1306                                          overwrite_mode);
1307    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
1308                                  left_type_info, right_type_info, deferred);
1309
1310    if (op == Token::DIV) {
1311      __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
1312      deferred->BindExit();
1313      left->Unuse();
1314      right->Unuse();
1315      answer = quotient;
1316    } else {
1317      ASSERT(op == Token::MOD);
1318      __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
1319      deferred->BindExit();
1320      left->Unuse();
1321      right->Unuse();
1322      answer = remainder;
1323    }
1324    ASSERT(answer.is_valid());
1325    return answer;
1326  }
1327
1328  // Special handling of shift operations because they use fixed
1329  // registers.
1330  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
1331    // Move left out of rcx if necessary.
1332    if (left->is_register() && left->reg().is(rcx)) {
1333      *left = allocator_->Allocate();
1334      ASSERT(left->is_valid());
1335      __ movq(left->reg(), rcx);
1336    }
1337    right->ToRegister(rcx);
1338    left->ToRegister();
1339    ASSERT(left->is_register() && !left->reg().is(rcx));
1340    ASSERT(right->is_register() && right->reg().is(rcx));
1341
1342    // We will modify right, it must be spilled.
1343    frame_->Spill(rcx);
1344
1345    // Use a fresh answer register to avoid spilling the left operand.
1346    answer = allocator_->Allocate();
1347    ASSERT(answer.is_valid());
1348    // Check that both operands are smis using the answer register as a
1349    // temporary.
1350    DeferredInlineBinaryOperation* deferred =
1351        new DeferredInlineBinaryOperation(op,
1352                                          answer.reg(),
1353                                          left->reg(),
1354                                          rcx,
1355                                          overwrite_mode);
1356
1357    Label do_op;
1358    // Left operand must be unchanged in left->reg() for deferred code.
1359    // Left operand is in answer.reg(), possibly converted to int32, for
1360    // inline code.
1361    __ movq(answer.reg(), left->reg());
1362    if (right_type_info.IsSmi()) {
1363      if (FLAG_debug_code) {
1364        __ AbortIfNotSmi(right->reg());
1365      }
1366      // If left is not known to be a smi, check if it is.
1367      // If left is not known to be a number, and it isn't a smi, check if
1368      // it is a HeapNumber.
1369      if (!left_type_info.IsSmi()) {
1370        __ JumpIfSmi(answer.reg(), &do_op);
1371        if (!left_type_info.IsNumber()) {
1372          // Branch if not a heapnumber.
1373          __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
1374                 FACTORY->heap_number_map());
1375          deferred->Branch(not_equal);
1376        }
1377        // Load integer value into answer register using truncation.
1378        __ cvttsd2si(answer.reg(),
1379                     FieldOperand(answer.reg(), HeapNumber::kValueOffset));
1380        // Branch if we might have overflowed.
1381        // (False negative for Smi::kMinValue)
1382        __ cmpl(answer.reg(), Immediate(0x80000000));
1383        deferred->Branch(equal);
1384        // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
1385        __ Integer32ToSmi(answer.reg(), answer.reg());
1386      } else {
1387        // Fast case - both are actually smis.
1388        if (FLAG_debug_code) {
1389          __ AbortIfNotSmi(left->reg());
1390        }
1391      }
1392    } else {
1393      JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
1394                                    left_type_info, right_type_info, deferred);
1395    }
1396    __ bind(&do_op);
1397
1398    // Perform the operation.
1399    switch (op) {
1400      case Token::SAR:
1401        __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
1402        break;
1403      case Token::SHR: {
1404        __ SmiShiftLogicalRight(answer.reg(),
1405                                answer.reg(),
1406                                rcx,
1407                                deferred->entry_label());
1408        break;
1409      }
1410      case Token::SHL: {
1411        __ SmiShiftLeft(answer.reg(),
1412                        answer.reg(),
1413                        rcx);
1414        break;
1415      }
1416      default:
1417        UNREACHABLE();
1418    }
1419    deferred->BindExit();
1420    left->Unuse();
1421    right->Unuse();
1422    ASSERT(answer.is_valid());
1423    return answer;
1424  }
1425
1426  // Handle the other binary operations.
1427  left->ToRegister();
1428  right->ToRegister();
1429  // A newly allocated register answer is used to hold the answer.  The
1430  // registers containing left and right are not modified so they don't
1431  // need to be spilled in the fast case.
1432  answer = allocator_->Allocate();
1433  ASSERT(answer.is_valid());
1434
1435  // Perform the smi tag check.
1436  DeferredInlineBinaryOperation* deferred =
1437      new DeferredInlineBinaryOperation(op,
1438                                        answer.reg(),
1439                                        left->reg(),
1440                                        right->reg(),
1441                                        overwrite_mode);
1442  JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
1443                                left_type_info, right_type_info, deferred);
1444
1445  switch (op) {
1446    case Token::ADD:
1447      __ SmiAdd(answer.reg(),
1448                left->reg(),
1449                right->reg(),
1450                deferred->entry_label());
1451      break;
1452
1453    case Token::SUB:
1454      __ SmiSub(answer.reg(),
1455                left->reg(),
1456                right->reg(),
1457                deferred->entry_label());
1458      break;
1459
1460    case Token::MUL: {
1461      __ SmiMul(answer.reg(),
1462                left->reg(),
1463                right->reg(),
1464                deferred->entry_label());
1465      break;
1466    }
1467
1468    case Token::BIT_OR:
1469      __ SmiOr(answer.reg(), left->reg(), right->reg());
1470      break;
1471
1472    case Token::BIT_AND:
1473      __ SmiAnd(answer.reg(), left->reg(), right->reg());
1474      break;
1475
1476    case Token::BIT_XOR:
1477      __ SmiXor(answer.reg(), left->reg(), right->reg());
1478      break;
1479
1480    default:
1481      UNREACHABLE();
1482      break;
1483  }
1484  deferred->BindExit();
1485  left->Unuse();
1486  right->Unuse();
1487  ASSERT(answer.is_valid());
1488  return answer;
1489}
1490
1491
1492// Call the appropriate binary operation stub to compute src op value
1493// and leave the result in dst.
1494class DeferredInlineSmiOperation: public DeferredCode {
1495 public:
1496  DeferredInlineSmiOperation(Token::Value op,
1497                             Register dst,
1498                             Register src,
1499                             Smi* value,
1500                             OverwriteMode overwrite_mode)
1501      : op_(op),
1502        dst_(dst),
1503        src_(src),
1504        value_(value),
1505        overwrite_mode_(overwrite_mode) {
1506    set_comment("[ DeferredInlineSmiOperation");
1507  }
1508
1509  virtual void Generate();
1510
1511 private:
1512  Token::Value op_;
1513  Register dst_;
1514  Register src_;
1515  Smi* value_;
1516  OverwriteMode overwrite_mode_;
1517};
1518
1519
1520void DeferredInlineSmiOperation::Generate() {
1521  // For mod we don't generate all the Smi code inline.
1522  GenericBinaryOpStub stub(
1523      op_,
1524      overwrite_mode_,
1525      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
1526  stub.GenerateCall(masm_, src_, value_);
1527  if (!dst_.is(rax)) __ movq(dst_, rax);
1528}
1529
1530
1531// Call the appropriate binary operation stub to compute value op src
1532// and leave the result in dst.
1533class DeferredInlineSmiOperationReversed: public DeferredCode {
1534 public:
1535  DeferredInlineSmiOperationReversed(Token::Value op,
1536                                     Register dst,
1537                                     Smi* value,
1538                                     Register src,
1539                                     OverwriteMode overwrite_mode)
1540      : op_(op),
1541        dst_(dst),
1542        value_(value),
1543        src_(src),
1544        overwrite_mode_(overwrite_mode) {
1545    set_comment("[ DeferredInlineSmiOperationReversed");
1546  }
1547
1548  virtual void Generate();
1549
1550 private:
1551  Token::Value op_;
1552  Register dst_;
1553  Smi* value_;
1554  Register src_;
1555  OverwriteMode overwrite_mode_;
1556};
1557
1558
1559void DeferredInlineSmiOperationReversed::Generate() {
1560  GenericBinaryOpStub stub(
1561      op_,
1562      overwrite_mode_,
1563      NO_SMI_CODE_IN_STUB);
1564  stub.GenerateCall(masm_, value_, src_);
1565  if (!dst_.is(rax)) __ movq(dst_, rax);
1566}
1567class DeferredInlineSmiAdd: public DeferredCode {
1568 public:
1569  DeferredInlineSmiAdd(Register dst,
1570                       Smi* value,
1571                       OverwriteMode overwrite_mode)
1572      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1573    set_comment("[ DeferredInlineSmiAdd");
1574  }
1575
1576  virtual void Generate();
1577
1578 private:
1579  Register dst_;
1580  Smi* value_;
1581  OverwriteMode overwrite_mode_;
1582};
1583
1584
1585void DeferredInlineSmiAdd::Generate() {
1586  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
1587  igostub.GenerateCall(masm_, dst_, value_);
1588  if (!dst_.is(rax)) __ movq(dst_, rax);
1589}
1590
1591
1592// The result of value + src is in dst.  It either overflowed or was not
1593// smi tagged.  Undo the speculative addition and call the appropriate
1594// specialized stub for add.  The result is left in dst.
1595class DeferredInlineSmiAddReversed: public DeferredCode {
1596 public:
1597  DeferredInlineSmiAddReversed(Register dst,
1598                               Smi* value,
1599                               OverwriteMode overwrite_mode)
1600      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1601    set_comment("[ DeferredInlineSmiAddReversed");
1602  }
1603
1604  virtual void Generate();
1605
1606 private:
1607  Register dst_;
1608  Smi* value_;
1609  OverwriteMode overwrite_mode_;
1610};
1611
1612
1613void DeferredInlineSmiAddReversed::Generate() {
1614  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
1615  igostub.GenerateCall(masm_, value_, dst_);
1616  if (!dst_.is(rax)) __ movq(dst_, rax);
1617}
1618
1619
1620class DeferredInlineSmiSub: public DeferredCode {
1621 public:
1622  DeferredInlineSmiSub(Register dst,
1623                       Smi* value,
1624                       OverwriteMode overwrite_mode)
1625      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1626    set_comment("[ DeferredInlineSmiSub");
1627  }
1628
1629  virtual void Generate();
1630
1631 private:
1632  Register dst_;
1633  Smi* value_;
1634  OverwriteMode overwrite_mode_;
1635};
1636
1637
1638void DeferredInlineSmiSub::Generate() {
1639  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
1640  igostub.GenerateCall(masm_, dst_, value_);
1641  if (!dst_.is(rax)) __ movq(dst_, rax);
1642}
1643
1644
1645Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
1646                                                 Result* operand,
1647                                                 Handle<Object> value,
1648                                                 bool reversed,
1649                                                 OverwriteMode overwrite_mode) {
1650  // Generate inline code for a binary operation when one of the
1651  // operands is a constant smi.  Consumes the argument "operand".
1652  if (IsUnsafeSmi(value)) {
1653    Result unsafe_operand(value);
1654    if (reversed) {
1655      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
1656                               overwrite_mode);
1657    } else {
1658      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
1659                               overwrite_mode);
1660    }
1661  }
1662
1663  // Get the literal value.
1664  Smi* smi_value = Smi::cast(*value);
1665  int int_value = smi_value->value();
1666
1667  Token::Value op = expr->op();
1668  Result answer;
1669  switch (op) {
1670    case Token::ADD: {
1671      operand->ToRegister();
1672      frame_->Spill(operand->reg());
1673      DeferredCode* deferred = NULL;
1674      if (reversed) {
1675        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
1676                                                    smi_value,
1677                                                    overwrite_mode);
1678      } else {
1679        deferred = new DeferredInlineSmiAdd(operand->reg(),
1680                                            smi_value,
1681                                            overwrite_mode);
1682      }
1683      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1684                                deferred);
1685      __ SmiAddConstant(operand->reg(),
1686                        operand->reg(),
1687                        smi_value,
1688                        deferred->entry_label());
1689      deferred->BindExit();
1690      answer = *operand;
1691      break;
1692    }
1693
1694    case Token::SUB: {
1695      if (reversed) {
1696        Result constant_operand(value);
1697        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
1698                                          overwrite_mode);
1699      } else {
1700        operand->ToRegister();
1701        frame_->Spill(operand->reg());
1702        answer = *operand;
1703        DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
1704                                                          smi_value,
1705                                                          overwrite_mode);
1706        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1707                                  deferred);
1708        // A smi currently fits in a 32-bit Immediate.
1709        __ SmiSubConstant(operand->reg(),
1710                          operand->reg(),
1711                          smi_value,
1712                          deferred->entry_label());
1713        deferred->BindExit();
1714        operand->Unuse();
1715      }
1716      break;
1717    }
1718
1719    case Token::SAR:
1720      if (reversed) {
1721        Result constant_operand(value);
1722        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
1723                                          overwrite_mode);
1724      } else {
1725        // Only the least significant 5 bits of the shift value are used.
1726        // In the slow case, this masking is done inside the runtime call.
1727        int shift_value = int_value & 0x1f;
1728        operand->ToRegister();
1729        frame_->Spill(operand->reg());
1730        DeferredInlineSmiOperation* deferred =
1731            new DeferredInlineSmiOperation(op,
1732                                           operand->reg(),
1733                                           operand->reg(),
1734                                           smi_value,
1735                                           overwrite_mode);
1736        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1737                                  deferred);
1738        __ SmiShiftArithmeticRightConstant(operand->reg(),
1739                                           operand->reg(),
1740                                           shift_value);
1741        deferred->BindExit();
1742        answer = *operand;
1743      }
1744      break;
1745
1746    case Token::SHR:
1747      if (reversed) {
1748        Result constant_operand(value);
1749        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
1750                                          overwrite_mode);
1751      } else {
1752        // Only the least significant 5 bits of the shift value are used.
1753        // In the slow case, this masking is done inside the runtime call.
1754        int shift_value = int_value & 0x1f;
1755        operand->ToRegister();
1756        answer = allocator()->Allocate();
1757        ASSERT(answer.is_valid());
1758        DeferredInlineSmiOperation* deferred =
1759            new DeferredInlineSmiOperation(op,
1760                                           answer.reg(),
1761                                           operand->reg(),
1762                                           smi_value,
1763                                           overwrite_mode);
1764        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1765                                  deferred);
1766        __ SmiShiftLogicalRightConstant(answer.reg(),
1767                                        operand->reg(),
1768                                        shift_value,
1769                                        deferred->entry_label());
1770        deferred->BindExit();
1771        operand->Unuse();
1772      }
1773      break;
1774
1775    case Token::SHL:
1776      if (reversed) {
1777        operand->ToRegister();
1778
1779        // We need rcx to be available to hold operand, and to be spilled.
1780        // SmiShiftLeft implicitly modifies rcx.
1781        if (operand->reg().is(rcx)) {
1782          frame_->Spill(operand->reg());
1783          answer = allocator()->Allocate();
1784        } else {
1785          Result rcx_reg = allocator()->Allocate(rcx);
1786          // answer must not be rcx.
1787          answer = allocator()->Allocate();
1788          // rcx_reg goes out of scope.
1789        }
1790
1791        DeferredInlineSmiOperationReversed* deferred =
1792            new DeferredInlineSmiOperationReversed(op,
1793                                                   answer.reg(),
1794                                                   smi_value,
1795                                                   operand->reg(),
1796                                                   overwrite_mode);
1797        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1798                                  deferred);
1799
1800        __ Move(answer.reg(), smi_value);
1801        __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
1802        operand->Unuse();
1803
1804        deferred->BindExit();
1805      } else {
1806        // Only the least significant 5 bits of the shift value are used.
1807        // In the slow case, this masking is done inside the runtime call.
1808        int shift_value = int_value & 0x1f;
1809        operand->ToRegister();
1810        if (shift_value == 0) {
1811          // Spill operand so it can be overwritten in the slow case.
1812          frame_->Spill(operand->reg());
1813          DeferredInlineSmiOperation* deferred =
1814              new DeferredInlineSmiOperation(op,
1815                                             operand->reg(),
1816                                             operand->reg(),
1817                                             smi_value,
1818                                             overwrite_mode);
1819          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1820                                    deferred);
1821          deferred->BindExit();
1822          answer = *operand;
1823        } else {
1824          // Use a fresh temporary for nonzero shift values.
1825          answer = allocator()->Allocate();
1826          ASSERT(answer.is_valid());
1827          DeferredInlineSmiOperation* deferred =
1828              new DeferredInlineSmiOperation(op,
1829                                             answer.reg(),
1830                                             operand->reg(),
1831                                             smi_value,
1832                                             overwrite_mode);
1833          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1834                                    deferred);
1835          __ SmiShiftLeftConstant(answer.reg(),
1836                                  operand->reg(),
1837                                  shift_value);
1838          deferred->BindExit();
1839          operand->Unuse();
1840        }
1841      }
1842      break;
1843
1844    case Token::BIT_OR:
1845    case Token::BIT_XOR:
1846    case Token::BIT_AND: {
1847      operand->ToRegister();
1848      frame_->Spill(operand->reg());
1849      if (reversed) {
1850        // Bit operations with a constant smi are commutative.
1851        // We can swap left and right operands with no problem.
1852        // Swap left and right overwrite modes.  0->0, 1->2, 2->1.
1853        overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
1854      }
1855      DeferredCode* deferred =  new DeferredInlineSmiOperation(op,
1856                                                               operand->reg(),
1857                                                               operand->reg(),
1858                                                               smi_value,
1859                                                               overwrite_mode);
1860      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
1861                                deferred);
1862      if (op == Token::BIT_AND) {
1863        __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
1864      } else if (op == Token::BIT_XOR) {
1865        if (int_value != 0) {
1866          __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
1867        }
1868      } else {
1869        ASSERT(op == Token::BIT_OR);
1870        if (int_value != 0) {
1871          __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
1872        }
1873      }
1874      deferred->BindExit();
1875      answer = *operand;
1876      break;
1877    }
1878
1879    // Generate inline code for mod of powers of 2 and negative powers of 2.
1880    case Token::MOD:
1881      if (!reversed &&
1882          int_value != 0 &&
1883          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
1884        operand->ToRegister();
1885        frame_->Spill(operand->reg());
1886        DeferredCode* deferred =
1887            new DeferredInlineSmiOperation(op,
1888                                           operand->reg(),
1889                                           operand->reg(),
1890                                           smi_value,
1891                                           overwrite_mode);
1892        __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label());
1893        if (int_value < 0) int_value = -int_value;
1894        if (int_value == 1) {
1895          __ Move(operand->reg(), Smi::FromInt(0));
1896        } else {
1897          __ SmiAndConstant(operand->reg(),
1898                            operand->reg(),
1899                            Smi::FromInt(int_value - 1));
1900        }
1901        deferred->BindExit();
1902        answer = *operand;
1903        break;  // This break only applies if we generated code for MOD.
1904      }
1905      // Fall through if we did not find a power of 2 on the right hand side!
1906      // The next case must be the default.
1907
1908    default: {
1909      Result constant_operand(value);
1910      if (reversed) {
1911        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
1912                                          overwrite_mode);
1913      } else {
1914        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
1915                                          overwrite_mode);
1916      }
1917      break;
1918    }
1919  }
1920  ASSERT(answer.is_valid());
1921  return answer;
1922}
1923
1924
1925static bool CouldBeNaN(const Result& result) {
1926  if (result.type_info().IsSmi()) return false;
1927  if (result.type_info().IsInteger32()) return false;
1928  if (!result.is_constant()) return true;
1929  if (!result.handle()->IsHeapNumber()) return false;
1930  return isnan(HeapNumber::cast(*result.handle())->value());
1931}
1932
1933
1934// Convert from signed to unsigned comparison to match the way EFLAGS are set
1935// by FPU and XMM compare instructions.
1936static Condition DoubleCondition(Condition cc) {
1937  switch (cc) {
1938    case less:          return below;
1939    case equal:         return equal;
1940    case less_equal:    return below_equal;
1941    case greater:       return above;
1942    case greater_equal: return above_equal;
1943    default:            UNREACHABLE();
1944  }
1945  UNREACHABLE();
1946  return equal;
1947}
1948
1949
1950static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
1951                                        bool inline_number_compare) {
1952  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
1953  if (nan_info == kCantBothBeNaN) {
1954    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
1955  }
1956  if (inline_number_compare) {
1957    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
1958  }
1959  return flags;
1960}
1961
1962
1963void CodeGenerator::Comparison(AstNode* node,
1964                               Condition cc,
1965                               bool strict,
1966                               ControlDestination* dest) {
1967  // Strict only makes sense for equality comparisons.
1968  ASSERT(!strict || cc == equal);
1969
1970  Result left_side;
1971  Result right_side;
1972  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1973  if (cc == greater || cc == less_equal) {
1974    cc = ReverseCondition(cc);
1975    left_side = frame_->Pop();
1976    right_side = frame_->Pop();
1977  } else {
1978    right_side = frame_->Pop();
1979    left_side = frame_->Pop();
1980  }
1981  ASSERT(cc == less || cc == equal || cc == greater_equal);
1982
1983  // If either side is a constant smi, optimize the comparison.
1984  bool left_side_constant_smi = false;
1985  bool left_side_constant_null = false;
1986  bool left_side_constant_1_char_string = false;
1987  if (left_side.is_constant()) {
1988    left_side_constant_smi = left_side.handle()->IsSmi();
1989    left_side_constant_null = left_side.handle()->IsNull();
1990    left_side_constant_1_char_string =
1991        (left_side.handle()->IsString() &&
1992         String::cast(*left_side.handle())->length() == 1 &&
1993         String::cast(*left_side.handle())->IsAsciiRepresentation());
1994  }
1995  bool right_side_constant_smi = false;
1996  bool right_side_constant_null = false;
1997  bool right_side_constant_1_char_string = false;
1998  if (right_side.is_constant()) {
1999    right_side_constant_smi = right_side.handle()->IsSmi();
2000    right_side_constant_null = right_side.handle()->IsNull();
2001    right_side_constant_1_char_string =
2002        (right_side.handle()->IsString() &&
2003         String::cast(*right_side.handle())->length() == 1 &&
2004         String::cast(*right_side.handle())->IsAsciiRepresentation());
2005  }
2006
2007  if (left_side_constant_smi || right_side_constant_smi) {
2008    bool is_loop_condition = (node->AsExpression() != NULL) &&
2009        node->AsExpression()->is_loop_condition();
2010    ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
2011                          left_side_constant_smi, right_side_constant_smi,
2012                          is_loop_condition);
2013  } else if (left_side_constant_1_char_string ||
2014             right_side_constant_1_char_string) {
2015    if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
2016      // Trivial case, comparing two constants.
2017      int left_value = String::cast(*left_side.handle())->Get(0);
2018      int right_value = String::cast(*right_side.handle())->Get(0);
2019      switch (cc) {
2020        case less:
2021          dest->Goto(left_value < right_value);
2022          break;
2023        case equal:
2024          dest->Goto(left_value == right_value);
2025          break;
2026        case greater_equal:
2027          dest->Goto(left_value >= right_value);
2028          break;
2029        default:
2030          UNREACHABLE();
2031      }
2032    } else {
2033      // Only one side is a constant 1 character string.
2034      // If left side is a constant 1-character string, reverse the operands.
2035      // Since one side is a constant string, conversion order does not matter.
2036      if (left_side_constant_1_char_string) {
2037        Result temp = left_side;
2038        left_side = right_side;
2039        right_side = temp;
2040        cc = ReverseCondition(cc);
2041        // This may reintroduce greater or less_equal as the value of cc.
2042        // CompareStub and the inline code both support all values of cc.
2043      }
2044      // Implement comparison against a constant string, inlining the case
2045      // where both sides are strings.
2046      left_side.ToRegister();
2047
2048      // Here we split control flow to the stub call and inlined cases
2049      // before finally splitting it to the control destination.  We use
2050      // a jump target and branching to duplicate the virtual frame at
2051      // the first split.  We manually handle the off-frame references
2052      // by reconstituting them on the non-fall-through path.
2053      JumpTarget is_not_string, is_string;
2054      Register left_reg = left_side.reg();
2055      Handle<Object> right_val = right_side.handle();
2056      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
2057      Condition is_smi = masm()->CheckSmi(left_reg);
2058      is_not_string.Branch(is_smi, &left_side);
2059      Result temp = allocator_->Allocate();
2060      ASSERT(temp.is_valid());
2061      __ movq(temp.reg(),
2062              FieldOperand(left_reg, HeapObject::kMapOffset));
2063      __ movzxbl(temp.reg(),
2064                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
2065      // If we are testing for equality then make use of the symbol shortcut.
2066      // Check if the left hand side has the same type as the right hand
2067      // side (which is always a symbol).
2068      if (cc == equal) {
2069        Label not_a_symbol;
2070        STATIC_ASSERT(kSymbolTag != 0);
2071        // Ensure that no non-strings have the symbol bit set.
2072        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
2073        __ testb(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
2074        __ j(zero, &not_a_symbol);
2075        // They are symbols, so do identity compare.
2076        __ Cmp(left_reg, right_side.handle());
2077        dest->true_target()->Branch(equal);
2078        dest->false_target()->Branch(not_equal);
2079        __ bind(&not_a_symbol);
2080      }
2081      // Call the compare stub if the left side is not a flat ascii string.
2082      __ andb(temp.reg(),
2083              Immediate(kIsNotStringMask |
2084                        kStringRepresentationMask |
2085                        kStringEncodingMask));
2086      __ cmpb(temp.reg(),
2087              Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
2088      temp.Unuse();
2089      is_string.Branch(equal, &left_side);
2090
2091      // Setup and call the compare stub.
2092      is_not_string.Bind(&left_side);
2093      CompareFlags flags =
2094          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
2095      CompareStub stub(cc, strict, flags);
2096      Result result = frame_->CallStub(&stub, &left_side, &right_side);
2097      result.ToRegister();
2098      __ testq(result.reg(), result.reg());
2099      result.Unuse();
2100      dest->true_target()->Branch(cc);
2101      dest->false_target()->Jump();
2102
2103      is_string.Bind(&left_side);
2104      // left_side is a sequential ASCII string.
2105      ASSERT(left_side.reg().is(left_reg));
2106      right_side = Result(right_val);
2107      Result temp2 = allocator_->Allocate();
2108      ASSERT(temp2.is_valid());
2109      // Test string equality and comparison.
2110      if (cc == equal) {
2111        Label comparison_done;
2112        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
2113                      Smi::FromInt(1));
2114        __ j(not_equal, &comparison_done);
2115        uint8_t char_value =
2116            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
2117        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
2118                Immediate(char_value));
2119        __ bind(&comparison_done);
2120      } else {
2121        __ movq(temp2.reg(),
2122                FieldOperand(left_side.reg(), String::kLengthOffset));
2123        __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
2124        Label comparison;
2125        // If the length is 0 then the subtraction gave -1 which compares less
2126        // than any character.
2127        __ j(negative, &comparison);
2128        // Otherwise load the first character.
2129        __ movzxbl(temp2.reg(),
2130                   FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
2131        __ bind(&comparison);
2132        // Compare the first character of the string with the
2133        // constant 1-character string.
2134        uint8_t char_value =
2135            static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
2136        __ cmpb(temp2.reg(), Immediate(char_value));
2137        Label characters_were_different;
2138        __ j(not_equal, &characters_were_different);
2139        // If the first character is the same then the long string sorts after
2140        // the short one.
2141        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
2142                      Smi::FromInt(1));
2143        __ bind(&characters_were_different);
2144      }
2145      temp2.Unuse();
2146      left_side.Unuse();
2147      right_side.Unuse();
2148      dest->Split(cc);
2149    }
2150  } else {
2151    // Neither side is a constant Smi, constant 1-char string, or constant null.
2152    // If either side is a non-smi constant, or known to be a heap number,
2153    // skip the smi check.
2154    bool known_non_smi =
2155        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
2156        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
2157        left_side.type_info().IsDouble() ||
2158        right_side.type_info().IsDouble();
2159
2160    NaNInformation nan_info =
2161        (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
2162        kBothCouldBeNaN :
2163        kCantBothBeNaN;
2164
2165    // Inline number comparison handling any combination of smi's and heap
2166    // numbers if:
2167    //   code is in a loop
2168    //   the compare operation is different from equal
2169    //   compare is not a for-loop comparison
2170    // The reason for excluding equal is that it will most likely be done
2171    // with smi's (not heap numbers) and the code to comparing smi's is inlined
2172    // separately. The same reason applies for for-loop comparison which will
2173    // also most likely be smi comparisons.
2174    bool is_loop_condition = (node->AsExpression() != NULL)
2175        && node->AsExpression()->is_loop_condition();
2176    bool inline_number_compare =
2177        loop_nesting() > 0 && cc != equal && !is_loop_condition;
2178
2179    // Left and right needed in registers for the following code.
2180    left_side.ToRegister();
2181    right_side.ToRegister();
2182
2183    if (known_non_smi) {
2184      // Inlined equality check:
2185      // If at least one of the objects is not NaN, then if the objects
2186      // are identical, they are equal.
2187      if (nan_info == kCantBothBeNaN && cc == equal) {
2188        __ cmpq(left_side.reg(), right_side.reg());
2189        dest->true_target()->Branch(equal);
2190      }
2191
2192      // Inlined number comparison:
2193      if (inline_number_compare) {
2194        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
2195      }
2196
2197      // End of in-line compare, call out to the compare stub. Don't include
2198      // number comparison in the stub if it was inlined.
2199      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
2200      CompareStub stub(cc, strict, flags);
2201      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2202      __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flag.
2203      answer.Unuse();
2204      dest->Split(cc);
2205    } else {
2206      // Here we split control flow to the stub call and inlined cases
2207      // before finally splitting it to the control destination.  We use
2208      // a jump target and branching to duplicate the virtual frame at
2209      // the first split.  We manually handle the off-frame references
2210      // by reconstituting them on the non-fall-through path.
2211      JumpTarget is_smi;
2212      Register left_reg = left_side.reg();
2213      Register right_reg = right_side.reg();
2214
2215      // In-line check for comparing two smis.
2216      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
2217
2218      if (has_valid_frame()) {
2219        // Inline the equality check if both operands can't be a NaN. If both
2220        // objects are the same they are equal.
2221        if (nan_info == kCantBothBeNaN && cc == equal) {
2222          __ cmpq(left_side.reg(), right_side.reg());
2223          dest->true_target()->Branch(equal);
2224        }
2225
2226        // Inlined number comparison:
2227        if (inline_number_compare) {
2228          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
2229        }
2230
2231        // End of in-line compare, call out to the compare stub. Don't include
2232        // number comparison in the stub if it was inlined.
2233        CompareFlags flags =
2234            ComputeCompareFlags(nan_info, inline_number_compare);
2235        CompareStub stub(cc, strict, flags);
2236        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
2237        __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
2238        answer.Unuse();
2239        if (is_smi.is_linked()) {
2240          dest->true_target()->Branch(cc);
2241          dest->false_target()->Jump();
2242        } else {
2243          dest->Split(cc);
2244        }
2245      }
2246
2247      if (is_smi.is_linked()) {
2248        is_smi.Bind();
2249        left_side = Result(left_reg);
2250        right_side = Result(right_reg);
2251        __ SmiCompare(left_side.reg(), right_side.reg());
2252        right_side.Unuse();
2253        left_side.Unuse();
2254        dest->Split(cc);
2255      }
2256    }
2257  }
2258}
2259
2260
2261void CodeGenerator::ConstantSmiComparison(Condition cc,
2262                                          bool strict,
2263                                          ControlDestination* dest,
2264                                          Result* left_side,
2265                                          Result* right_side,
2266                                          bool left_side_constant_smi,
2267                                          bool right_side_constant_smi,
2268                                          bool is_loop_condition) {
2269  if (left_side_constant_smi && right_side_constant_smi) {
2270    // Trivial case, comparing two constants.
2271    int left_value = Smi::cast(*left_side->handle())->value();
2272    int right_value = Smi::cast(*right_side->handle())->value();
2273    switch (cc) {
2274      case less:
2275        dest->Goto(left_value < right_value);
2276        break;
2277      case equal:
2278        dest->Goto(left_value == right_value);
2279        break;
2280      case greater_equal:
2281        dest->Goto(left_value >= right_value);
2282        break;
2283      default:
2284        UNREACHABLE();
2285    }
2286  } else {
2287    // Only one side is a constant Smi.
2288    // If left side is a constant Smi, reverse the operands.
2289    // Since one side is a constant Smi, conversion order does not matter.
2290    if (left_side_constant_smi) {
2291      Result* temp = left_side;
2292      left_side = right_side;
2293      right_side = temp;
2294      cc = ReverseCondition(cc);
2295      // This may re-introduce greater or less_equal as the value of cc.
2296      // CompareStub and the inline code both support all values of cc.
2297    }
2298    // Implement comparison against a constant Smi, inlining the case
2299    // where both sides are smis.
2300    left_side->ToRegister();
2301    Register left_reg = left_side->reg();
2302    Smi* constant_smi = Smi::cast(*right_side->handle());
2303
2304    if (left_side->is_smi()) {
2305      if (FLAG_debug_code) {
2306        __ AbortIfNotSmi(left_reg);
2307      }
2308      // Test smi equality and comparison by signed int comparison.
2309      __ SmiCompare(left_reg, constant_smi);
2310      left_side->Unuse();
2311      right_side->Unuse();
2312      dest->Split(cc);
2313    } else {
2314      // Only the case where the left side could possibly be a non-smi is left.
2315      JumpTarget is_smi;
2316      if (cc == equal) {
2317        // We can do the equality comparison before the smi check.
2318        __ Cmp(left_reg, constant_smi);
2319        dest->true_target()->Branch(equal);
2320        Condition left_is_smi = masm_->CheckSmi(left_reg);
2321        dest->false_target()->Branch(left_is_smi);
2322      } else {
2323        // Do the smi check, then the comparison.
2324        Condition left_is_smi = masm_->CheckSmi(left_reg);
2325        is_smi.Branch(left_is_smi, left_side, right_side);
2326      }
2327
2328      // Jump or fall through to here if we are comparing a non-smi to a
2329      // constant smi.  If the non-smi is a heap number and this is not
2330      // a loop condition, inline the floating point code.
2331      if (!is_loop_condition) {
2332        // Right side is a constant smi and left side has been checked
2333        // not to be a smi.
2334        JumpTarget not_number;
2335        __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
2336               FACTORY->heap_number_map());
2337        not_number.Branch(not_equal, left_side);
2338        __ movsd(xmm1,
2339                 FieldOperand(left_reg, HeapNumber::kValueOffset));
2340        int value = constant_smi->value();
2341        if (value == 0) {
2342          __ xorpd(xmm0, xmm0);
2343        } else {
2344          Result temp = allocator()->Allocate();
2345          __ movl(temp.reg(), Immediate(value));
2346          __ cvtlsi2sd(xmm0, temp.reg());
2347          temp.Unuse();
2348        }
2349        __ ucomisd(xmm1, xmm0);
2350        // Jump to builtin for NaN.
2351        not_number.Branch(parity_even, left_side);
2352        left_side->Unuse();
2353        dest->true_target()->Branch(DoubleCondition(cc));
2354        dest->false_target()->Jump();
2355        not_number.Bind(left_side);
2356      }
2357
2358      // Setup and call the compare stub.
2359      CompareFlags flags =
2360          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
2361      CompareStub stub(cc, strict, flags);
2362      Result result = frame_->CallStub(&stub, left_side, right_side);
2363      result.ToRegister();
2364      __ testq(result.reg(), result.reg());
2365      result.Unuse();
2366      if (cc == equal) {
2367        dest->Split(cc);
2368      } else {
2369        dest->true_target()->Branch(cc);
2370        dest->false_target()->Jump();
2371
2372        // It is important for performance for this case to be at the end.
2373        is_smi.Bind(left_side, right_side);
2374        __ SmiCompare(left_reg, constant_smi);
2375        left_side->Unuse();
2376        right_side->Unuse();
2377        dest->Split(cc);
2378      }
2379    }
2380  }
2381}
2382
2383
2384// Load a comparison operand into into a XMM register. Jump to not_numbers jump
2385// target passing the left and right result if the operand is not a number.
2386static void LoadComparisonOperand(MacroAssembler* masm_,
2387                                  Result* operand,
2388                                  XMMRegister xmm_reg,
2389                                  Result* left_side,
2390                                  Result* right_side,
2391                                  JumpTarget* not_numbers) {
2392  Label done;
2393  if (operand->type_info().IsDouble()) {
2394    // Operand is known to be a heap number, just load it.
2395    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
2396  } else if (operand->type_info().IsSmi()) {
2397    // Operand is known to be a smi. Convert it to double and keep the original
2398    // smi.
2399    __ SmiToInteger32(kScratchRegister, operand->reg());
2400    __ cvtlsi2sd(xmm_reg, kScratchRegister);
2401  } else {
2402    // Operand type not known, check for smi or heap number.
2403    Label smi;
2404    __ JumpIfSmi(operand->reg(), &smi);
2405    if (!operand->type_info().IsNumber()) {
2406      __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
2407      __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
2408              kScratchRegister);
2409      not_numbers->Branch(not_equal, left_side, right_side, taken);
2410    }
2411    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
2412    __ jmp(&done);
2413
2414    __ bind(&smi);
2415    // Comvert smi to float and keep the original smi.
2416    __ SmiToInteger32(kScratchRegister, operand->reg());
2417    __ cvtlsi2sd(xmm_reg, kScratchRegister);
2418    __ jmp(&done);
2419  }
2420  __ bind(&done);
2421}
2422
2423
2424void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
2425                                                   Result* right_side,
2426                                                   Condition cc,
2427                                                   ControlDestination* dest) {
2428  ASSERT(left_side->is_register());
2429  ASSERT(right_side->is_register());
2430
2431  JumpTarget not_numbers;
2432  // Load left and right operand into registers xmm0 and xmm1 and compare.
2433  LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
2434                        &not_numbers);
2435  LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
2436                        &not_numbers);
2437  __ ucomisd(xmm0, xmm1);
2438  // Bail out if a NaN is involved.
2439  not_numbers.Branch(parity_even, left_side, right_side);
2440
2441  // Split to destination targets based on comparison.
2442  left_side->Unuse();
2443  right_side->Unuse();
2444  dest->true_target()->Branch(DoubleCondition(cc));
2445  dest->false_target()->Jump();
2446
2447  not_numbers.Bind(left_side, right_side);
2448}
2449
2450
2451// Call the function just below TOS on the stack with the given
2452// arguments. The receiver is the TOS.
2453void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
2454                                      CallFunctionFlags flags,
2455                                      int position) {
2456  // Push the arguments ("left-to-right") on the stack.
2457  int arg_count = args->length();
2458  for (int i = 0; i < arg_count; i++) {
2459    Load(args->at(i));
2460    frame_->SpillTop();
2461  }
2462
2463  // Record the position for debugging purposes.
2464  CodeForSourcePosition(position);
2465
2466  // Use the shared code stub to call the function.
2467  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2468  CallFunctionStub call_function(arg_count, in_loop, flags);
2469  Result answer = frame_->CallStub(&call_function, arg_count + 1);
2470  // Restore context and replace function on the stack with the
2471  // result of the stub invocation.
2472  frame_->RestoreContextRegister();
2473  frame_->SetElementAt(0, &answer);
2474}
2475
2476
2477void CodeGenerator::CallApplyLazy(Expression* applicand,
2478                                  Expression* receiver,
2479                                  VariableProxy* arguments,
2480                                  int position) {
2481  // An optimized implementation of expressions of the form
2482  // x.apply(y, arguments).
2483  // If the arguments object of the scope has not been allocated,
2484  // and x.apply is Function.prototype.apply, this optimization
2485  // just copies y and the arguments of the current function on the
2486  // stack, as receiver and arguments, and calls x.
2487  // In the implementation comments, we call x the applicand
2488  // and y the receiver.
2489  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
2490  ASSERT(arguments->IsArguments());
2491
2492  // Load applicand.apply onto the stack. This will usually
2493  // give us a megamorphic load site. Not super, but it works.
2494  Load(applicand);
2495  frame()->Dup();
2496  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
2497  frame()->Push(name);
2498  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
2499  __ nop();
2500  frame()->Push(&answer);
2501
2502  // Load the receiver and the existing arguments object onto the
2503  // expression stack. Avoid allocating the arguments object here.
2504  Load(receiver);
2505  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
2506
2507  // Emit the source position information after having loaded the
2508  // receiver and the arguments.
2509  CodeForSourcePosition(position);
2510  // Contents of frame at this point:
2511  // Frame[0]: arguments object of the current function or the hole.
2512  // Frame[1]: receiver
2513  // Frame[2]: applicand.apply
2514  // Frame[3]: applicand.
2515
2516  // Check if the arguments object has been lazily allocated
2517  // already. If so, just use that instead of copying the arguments
2518  // from the stack. This also deals with cases where a local variable
2519  // named 'arguments' has been introduced.
2520  frame_->Dup();
2521  Result probe = frame_->Pop();
2522  { VirtualFrame::SpilledScope spilled_scope;
2523    Label slow, done;
2524    bool try_lazy = true;
2525    if (probe.is_constant()) {
2526      try_lazy = probe.handle()->IsArgumentsMarker();
2527    } else {
2528      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
2529      probe.Unuse();
2530      __ j(not_equal, &slow);
2531    }
2532
2533    if (try_lazy) {
2534      Label build_args;
2535      // Get rid of the arguments object probe.
2536      frame_->Drop();  // Can be called on a spilled frame.
2537      // Stack now has 3 elements on it.
2538      // Contents of stack at this point:
2539      // rsp[0]: receiver
2540      // rsp[1]: applicand.apply
2541      // rsp[2]: applicand.
2542
2543      // Check that the receiver really is a JavaScript object.
2544      __ movq(rax, Operand(rsp, 0));
2545      Condition is_smi = masm_->CheckSmi(rax);
2546      __ j(is_smi, &build_args);
2547      // We allow all JSObjects including JSFunctions.  As long as
2548      // JS_FUNCTION_TYPE is the last instance type and it is right
2549      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
2550      // bound.
2551      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
2552      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
2553      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
2554      __ j(below, &build_args);
2555
2556      // Check that applicand.apply is Function.prototype.apply.
2557      __ movq(rax, Operand(rsp, kPointerSize));
2558      is_smi = masm_->CheckSmi(rax);
2559      __ j(is_smi, &build_args);
2560      __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
2561      __ j(not_equal, &build_args);
2562      __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
2563      __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
2564      Handle<Code> apply_code = Isolate::Current()->builtins()->FunctionApply();
2565      __ Cmp(rcx, apply_code);
2566      __ j(not_equal, &build_args);
2567
2568      // Check that applicand is a function.
2569      __ movq(rdi, Operand(rsp, 2 * kPointerSize));
2570      is_smi = masm_->CheckSmi(rdi);
2571      __ j(is_smi, &build_args);
2572      __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2573      __ j(not_equal, &build_args);
2574
2575      // Copy the arguments to this function possibly from the
2576      // adaptor frame below it.
2577      Label invoke, adapted;
2578      __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2579      __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
2580             Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2581      __ j(equal, &adapted);
2582
2583      // No arguments adaptor frame. Copy fixed number of arguments.
2584      __ Set(rax, scope()->num_parameters());
2585      for (int i = 0; i < scope()->num_parameters(); i++) {
2586        __ push(frame_->ParameterAt(i));
2587      }
2588      __ jmp(&invoke);
2589
2590      // Arguments adaptor frame present. Copy arguments from there, but
2591      // avoid copying too many arguments to avoid stack overflows.
2592      __ bind(&adapted);
2593      static const uint32_t kArgumentsLimit = 1 * KB;
2594      __ SmiToInteger32(rax,
2595                        Operand(rdx,
2596                                ArgumentsAdaptorFrameConstants::kLengthOffset));
2597      __ movl(rcx, rax);
2598      __ cmpl(rax, Immediate(kArgumentsLimit));
2599      __ j(above, &build_args);
2600
2601      // Loop through the arguments pushing them onto the execution
2602      // stack. We don't inform the virtual frame of the push, so we don't
2603      // have to worry about getting rid of the elements from the virtual
2604      // frame.
2605      Label loop;
2606      // rcx is a small non-negative integer, due to the test above.
2607      __ testl(rcx, rcx);
2608      __ j(zero, &invoke);
2609      __ bind(&loop);
2610      __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
2611      __ decl(rcx);
2612      __ j(not_zero, &loop);
2613
2614      // Invoke the function.
2615      __ bind(&invoke);
2616      ParameterCount actual(rax);
2617      __ InvokeFunction(rdi, actual, CALL_FUNCTION);
2618      // Drop applicand.apply and applicand from the stack, and push
2619      // the result of the function call, but leave the spilled frame
2620      // unchanged, with 3 elements, so it is correct when we compile the
2621      // slow-case code.
2622      __ addq(rsp, Immediate(2 * kPointerSize));
2623      __ push(rax);
2624      // Stack now has 1 element:
2625      //   rsp[0]: result
2626      __ jmp(&done);
2627
2628      // Slow-case: Allocate the arguments object since we know it isn't
2629      // there, and fall-through to the slow-case where we call
2630      // applicand.apply.
2631      __ bind(&build_args);
2632      // Stack now has 3 elements, because we have jumped from where:
2633      // rsp[0]: receiver
2634      // rsp[1]: applicand.apply
2635      // rsp[2]: applicand.
2636
2637      // StoreArgumentsObject requires a correct frame, and may modify it.
2638      Result arguments_object = StoreArgumentsObject(false);
2639      frame_->SpillAll();
2640      arguments_object.ToRegister();
2641      frame_->EmitPush(arguments_object.reg());
2642      arguments_object.Unuse();
2643      // Stack and frame now have 4 elements.
2644      __ bind(&slow);
2645    }
2646
2647    // Generic computation of x.apply(y, args) with no special optimization.
2648    // Flip applicand.apply and applicand on the stack, so
2649    // applicand looks like the receiver of the applicand.apply call.
2650    // Then process it as a normal function call.
2651    __ movq(rax, Operand(rsp, 3 * kPointerSize));
2652    __ movq(rbx, Operand(rsp, 2 * kPointerSize));
2653    __ movq(Operand(rsp, 2 * kPointerSize), rax);
2654    __ movq(Operand(rsp, 3 * kPointerSize), rbx);
2655
2656    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
2657    Result res = frame_->CallStub(&call_function, 3);
2658    // The function and its two arguments have been dropped.
2659    frame_->Drop(1);  // Drop the receiver as well.
2660    res.ToRegister();
2661    frame_->EmitPush(res.reg());
2662    // Stack now has 1 element:
2663    //   rsp[0]: result
2664    if (try_lazy) __ bind(&done);
2665  }  // End of spilled scope.
2666  // Restore the context register after a call.
2667  frame_->RestoreContextRegister();
2668}
2669
2670
2671class DeferredStackCheck: public DeferredCode {
2672 public:
2673  DeferredStackCheck() {
2674    set_comment("[ DeferredStackCheck");
2675  }
2676
2677  virtual void Generate();
2678};
2679
2680
2681void DeferredStackCheck::Generate() {
2682  StackCheckStub stub;
2683  __ CallStub(&stub);
2684}
2685
2686
2687void CodeGenerator::CheckStack() {
2688  DeferredStackCheck* deferred = new DeferredStackCheck;
2689  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
2690  deferred->Branch(below);
2691  deferred->BindExit();
2692}
2693
2694
2695void CodeGenerator::VisitAndSpill(Statement* statement) {
2696  ASSERT(in_spilled_code());
2697  set_in_spilled_code(false);
2698  Visit(statement);
2699  if (frame_ != NULL) {
2700    frame_->SpillAll();
2701  }
2702  set_in_spilled_code(true);
2703}
2704
2705
2706void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
2707#ifdef DEBUG
2708  int original_height = frame_->height();
2709#endif
2710  ASSERT(in_spilled_code());
2711  set_in_spilled_code(false);
2712  VisitStatements(statements);
2713  if (frame_ != NULL) {
2714    frame_->SpillAll();
2715  }
2716  set_in_spilled_code(true);
2717
2718  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2719}
2720
2721
2722void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
2723#ifdef DEBUG
2724  int original_height = frame_->height();
2725#endif
2726  ASSERT(!in_spilled_code());
2727  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
2728    Visit(statements->at(i));
2729  }
2730  ASSERT(!has_valid_frame() || frame_->height() == original_height);
2731}
2732
2733
2734void CodeGenerator::VisitBlock(Block* node) {
2735  ASSERT(!in_spilled_code());
2736  Comment cmnt(masm_, "[ Block");
2737  CodeForStatementPosition(node);
2738  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
2739  VisitStatements(node->statements());
2740  if (node->break_target()->is_linked()) {
2741    node->break_target()->Bind();
2742  }
2743  node->break_target()->Unuse();
2744}
2745
2746
2747void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
2748  // Call the runtime to declare the globals.  The inevitable call
2749  // will sync frame elements to memory anyway, so we do it eagerly to
2750  // allow us to push the arguments directly into place.
2751  frame_->SyncRange(0, frame_->element_count() - 1);
2752
2753  __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
2754  frame_->EmitPush(rsi);  // The context is the first argument.
2755  frame_->EmitPush(kScratchRegister);
2756  frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
2757  frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
2758  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
2759  // Return value is ignored.
2760}
2761
2762
2763void CodeGenerator::VisitDeclaration(Declaration* node) {
2764  Comment cmnt(masm_, "[ Declaration");
2765  Variable* var = node->proxy()->var();
2766  ASSERT(var != NULL);  // must have been resolved
2767  Slot* slot = var->AsSlot();
2768
2769  // If it was not possible to allocate the variable at compile time,
2770  // we need to "declare" it at runtime to make sure it actually
2771  // exists in the local context.
2772  if (slot != NULL && slot->type() == Slot::LOOKUP) {
2773    // Variables with a "LOOKUP" slot were introduced as non-locals
2774    // during variable resolution and must have mode DYNAMIC.
2775    ASSERT(var->is_dynamic());
2776    // For now, just do a runtime call.  Sync the virtual frame eagerly
2777    // so we can simply push the arguments into place.
2778    frame_->SyncRange(0, frame_->element_count() - 1);
2779    frame_->EmitPush(rsi);
2780    __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
2781    frame_->EmitPush(kScratchRegister);
2782    // Declaration nodes are always introduced in one of two modes.
2783    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
2784    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
2785    frame_->EmitPush(Smi::FromInt(attr));
2786    // Push initial value, if any.
2787    // Note: For variables we must not push an initial value (such as
2788    // 'undefined') because we may have a (legal) redeclaration and we
2789    // must not destroy the current value.
2790    if (node->mode() == Variable::CONST) {
2791      frame_->EmitPush(Heap::kTheHoleValueRootIndex);
2792    } else if (node->fun() != NULL) {
2793      Load(node->fun());
2794    } else {
2795      frame_->EmitPush(Smi::FromInt(0));  // no initial value!
2796    }
2797    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
2798    // Ignore the return value (declarations are statements).
2799    return;
2800  }
2801
2802  ASSERT(!var->is_global());
2803
2804  // If we have a function or a constant, we need to initialize the variable.
2805  Expression* val = NULL;
2806  if (node->mode() == Variable::CONST) {
2807    val = new Literal(FACTORY->the_hole_value());
2808  } else {
2809    val = node->fun();  // NULL if we don't have a function
2810  }
2811
2812  if (val != NULL) {
2813    {
2814      // Set the initial value.
2815      Reference target(this, node->proxy());
2816      Load(val);
2817      target.SetValue(NOT_CONST_INIT);
2818      // The reference is removed from the stack (preserving TOS) when
2819      // it goes out of scope.
2820    }
2821    // Get rid of the assigned value (declarations are statements).
2822    frame_->Drop();
2823  }
2824}
2825
2826
2827void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
2828  ASSERT(!in_spilled_code());
2829  Comment cmnt(masm_, "[ ExpressionStatement");
2830  CodeForStatementPosition(node);
2831  Expression* expression = node->expression();
2832  expression->MarkAsStatement();
2833  Load(expression);
2834  // Remove the lingering expression result from the top of stack.
2835  frame_->Drop();
2836}
2837
2838
2839void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
2840  ASSERT(!in_spilled_code());
2841  Comment cmnt(masm_, "// EmptyStatement");
2842  CodeForStatementPosition(node);
2843  // nothing to do
2844}
2845
2846
2847void CodeGenerator::VisitIfStatement(IfStatement* node) {
2848  ASSERT(!in_spilled_code());
2849  Comment cmnt(masm_, "[ IfStatement");
2850  // Generate different code depending on which parts of the if statement
2851  // are present or not.
2852  bool has_then_stm = node->HasThenStatement();
2853  bool has_else_stm = node->HasElseStatement();
2854
2855  CodeForStatementPosition(node);
2856  JumpTarget exit;
2857  if (has_then_stm && has_else_stm) {
2858    JumpTarget then;
2859    JumpTarget else_;
2860    ControlDestination dest(&then, &else_, true);
2861    LoadCondition(node->condition(), &dest, true);
2862
2863    if (dest.false_was_fall_through()) {
2864      // The else target was bound, so we compile the else part first.
2865      Visit(node->else_statement());
2866
2867      // We may have dangling jumps to the then part.
2868      if (then.is_linked()) {
2869        if (has_valid_frame()) exit.Jump();
2870        then.Bind();
2871        Visit(node->then_statement());
2872      }
2873    } else {
2874      // The then target was bound, so we compile the then part first.
2875      Visit(node->then_statement());
2876
2877      if (else_.is_linked()) {
2878        if (has_valid_frame()) exit.Jump();
2879        else_.Bind();
2880        Visit(node->else_statement());
2881      }
2882    }
2883
2884  } else if (has_then_stm) {
2885    ASSERT(!has_else_stm);
2886    JumpTarget then;
2887    ControlDestination dest(&then, &exit, true);
2888    LoadCondition(node->condition(), &dest, true);
2889
2890    if (dest.false_was_fall_through()) {
2891      // The exit label was bound.  We may have dangling jumps to the
2892      // then part.
2893      if (then.is_linked()) {
2894        exit.Unuse();
2895        exit.Jump();
2896        then.Bind();
2897        Visit(node->then_statement());
2898      }
2899    } else {
2900      // The then label was bound.
2901      Visit(node->then_statement());
2902    }
2903
2904  } else if (has_else_stm) {
2905    ASSERT(!has_then_stm);
2906    JumpTarget else_;
2907    ControlDestination dest(&exit, &else_, false);
2908    LoadCondition(node->condition(), &dest, true);
2909
2910    if (dest.true_was_fall_through()) {
2911      // The exit label was bound.  We may have dangling jumps to the
2912      // else part.
2913      if (else_.is_linked()) {
2914        exit.Unuse();
2915        exit.Jump();
2916        else_.Bind();
2917        Visit(node->else_statement());
2918      }
2919    } else {
2920      // The else label was bound.
2921      Visit(node->else_statement());
2922    }
2923
2924  } else {
2925    ASSERT(!has_then_stm && !has_else_stm);
2926    // We only care about the condition's side effects (not its value
2927    // or control flow effect).  LoadCondition is called without
2928    // forcing control flow.
2929    ControlDestination dest(&exit, &exit, true);
2930    LoadCondition(node->condition(), &dest, false);
2931    if (!dest.is_used()) {
2932      // We got a value on the frame rather than (or in addition to)
2933      // control flow.
2934      frame_->Drop();
2935    }
2936  }
2937
2938  if (exit.is_linked()) {
2939    exit.Bind();
2940  }
2941}
2942
2943
2944void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
2945  ASSERT(!in_spilled_code());
2946  Comment cmnt(masm_, "[ ContinueStatement");
2947  CodeForStatementPosition(node);
2948  node->target()->continue_target()->Jump();
2949}
2950
2951
2952void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
2953  ASSERT(!in_spilled_code());
2954  Comment cmnt(masm_, "[ BreakStatement");
2955  CodeForStatementPosition(node);
2956  node->target()->break_target()->Jump();
2957}
2958
2959
2960void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
2961  ASSERT(!in_spilled_code());
2962  Comment cmnt(masm_, "[ ReturnStatement");
2963
2964  CodeForStatementPosition(node);
2965  Load(node->expression());
2966  Result return_value = frame_->Pop();
2967  masm()->positions_recorder()->WriteRecordedPositions();
2968  if (function_return_is_shadowed_) {
2969    function_return_.Jump(&return_value);
2970  } else {
2971    frame_->PrepareForReturn();
2972    if (function_return_.is_bound()) {
2973      // If the function return label is already bound we reuse the
2974      // code by jumping to the return site.
2975      function_return_.Jump(&return_value);
2976    } else {
2977      function_return_.Bind(&return_value);
2978      GenerateReturnSequence(&return_value);
2979    }
2980  }
2981}
2982
2983
2984void CodeGenerator::GenerateReturnSequence(Result* return_value) {
2985  // The return value is a live (but not currently reference counted)
2986  // reference to rax.  This is safe because the current frame does not
2987  // contain a reference to rax (it is prepared for the return by spilling
2988  // all registers).
2989  if (FLAG_trace) {
2990    frame_->Push(return_value);
2991    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
2992  }
2993  return_value->ToRegister(rax);
2994
2995  // Add a label for checking the size of the code used for returning.
2996#ifdef DEBUG
2997  Label check_exit_codesize;
2998  masm_->bind(&check_exit_codesize);
2999#endif
3000
3001  // Leave the frame and return popping the arguments and the
3002  // receiver.
3003  frame_->Exit();
3004  int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
3005  __ Ret(arguments_bytes, rcx);
3006  DeleteFrame();
3007
3008#ifdef ENABLE_DEBUGGER_SUPPORT
3009  // Add padding that will be overwritten by a debugger breakpoint.
3010  // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
3011  // with length 7 (3 + 1 + 3).
3012  const int kPadding = Assembler::kJSReturnSequenceLength - 7;
3013  for (int i = 0; i < kPadding; ++i) {
3014    masm_->int3();
3015  }
3016  // Check that the size of the code used for returning is large enough
3017  // for the debugger's requirements.
3018  ASSERT(Assembler::kJSReturnSequenceLength <=
3019         masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
3020#endif
3021}
3022
3023
3024void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
3025  ASSERT(!in_spilled_code());
3026  Comment cmnt(masm_, "[ WithEnterStatement");
3027  CodeForStatementPosition(node);
3028  Load(node->expression());
3029  Result context;
3030  if (node->is_catch_block()) {
3031    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
3032  } else {
3033    context = frame_->CallRuntime(Runtime::kPushContext, 1);
3034  }
3035
3036  // Update context local.
3037  frame_->SaveContextRegister();
3038
3039  // Verify that the runtime call result and rsi agree.
3040  if (FLAG_debug_code) {
3041    __ cmpq(context.reg(), rsi);
3042    __ Assert(equal, "Runtime::NewContext should end up in rsi");
3043  }
3044}
3045
3046
3047void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
3048  ASSERT(!in_spilled_code());
3049  Comment cmnt(masm_, "[ WithExitStatement");
3050  CodeForStatementPosition(node);
3051  // Pop context.
3052  __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
3053  // Update context local.
3054  frame_->SaveContextRegister();
3055}
3056
3057
3058void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
3059  ASSERT(!in_spilled_code());
3060  Comment cmnt(masm_, "[ SwitchStatement");
3061  CodeForStatementPosition(node);
3062  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3063
3064  // Compile the switch value.
3065  Load(node->tag());
3066
3067  ZoneList<CaseClause*>* cases = node->cases();
3068  int length = cases->length();
3069  CaseClause* default_clause = NULL;
3070
3071  JumpTarget next_test;
3072  // Compile the case label expressions and comparisons.  Exit early
3073  // if a comparison is unconditionally true.  The target next_test is
3074  // bound before the loop in order to indicate control flow to the
3075  // first comparison.
3076  next_test.Bind();
3077  for (int i = 0; i < length && !next_test.is_unused(); i++) {
3078    CaseClause* clause = cases->at(i);
3079    // The default is not a test, but remember it for later.
3080    if (clause->is_default()) {
3081      default_clause = clause;
3082      continue;
3083    }
3084
3085    Comment cmnt(masm_, "[ Case comparison");
3086    // We recycle the same target next_test for each test.  Bind it if
3087    // the previous test has not done so and then unuse it for the
3088    // loop.
3089    if (next_test.is_linked()) {
3090      next_test.Bind();
3091    }
3092    next_test.Unuse();
3093
3094    // Duplicate the switch value.
3095    frame_->Dup();
3096
3097    // Compile the label expression.
3098    Load(clause->label());
3099
3100    // Compare and branch to the body if true or the next test if
3101    // false.  Prefer the next test as a fall through.
3102    ControlDestination dest(clause->body_target(), &next_test, false);
3103    Comparison(node, equal, true, &dest);
3104
3105    // If the comparison fell through to the true target, jump to the
3106    // actual body.
3107    if (dest.true_was_fall_through()) {
3108      clause->body_target()->Unuse();
3109      clause->body_target()->Jump();
3110    }
3111  }
3112
3113  // If there was control flow to a next test from the last one
3114  // compiled, compile a jump to the default or break target.
3115  if (!next_test.is_unused()) {
3116    if (next_test.is_linked()) {
3117      next_test.Bind();
3118    }
3119    // Drop the switch value.
3120    frame_->Drop();
3121    if (default_clause != NULL) {
3122      default_clause->body_target()->Jump();
3123    } else {
3124      node->break_target()->Jump();
3125    }
3126  }
3127
3128  // The last instruction emitted was a jump, either to the default
3129  // clause or the break target, or else to a case body from the loop
3130  // that compiles the tests.
3131  ASSERT(!has_valid_frame());
3132  // Compile case bodies as needed.
3133  for (int i = 0; i < length; i++) {
3134    CaseClause* clause = cases->at(i);
3135
3136    // There are two ways to reach the body: from the corresponding
3137    // test or as the fall through of the previous body.
3138    if (clause->body_target()->is_linked() || has_valid_frame()) {
3139      if (clause->body_target()->is_linked()) {
3140        if (has_valid_frame()) {
3141          // If we have both a jump to the test and a fall through, put
3142          // a jump on the fall through path to avoid the dropping of
3143          // the switch value on the test path.  The exception is the
3144          // default which has already had the switch value dropped.
3145          if (clause->is_default()) {
3146            clause->body_target()->Bind();
3147          } else {
3148            JumpTarget body;
3149            body.Jump();
3150            clause->body_target()->Bind();
3151            frame_->Drop();
3152            body.Bind();
3153          }
3154        } else {
3155          // No fall through to worry about.
3156          clause->body_target()->Bind();
3157          if (!clause->is_default()) {
3158            frame_->Drop();
3159          }
3160        }
3161      } else {
3162        // Otherwise, we have only fall through.
3163        ASSERT(has_valid_frame());
3164      }
3165
3166      // We are now prepared to compile the body.
3167      Comment cmnt(masm_, "[ Case body");
3168      VisitStatements(clause->statements());
3169    }
3170    clause->body_target()->Unuse();
3171  }
3172
3173  // We may not have a valid frame here so bind the break target only
3174  // if needed.
3175  if (node->break_target()->is_linked()) {
3176    node->break_target()->Bind();
3177  }
3178  node->break_target()->Unuse();
3179}
3180
3181
3182void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
3183  ASSERT(!in_spilled_code());
3184  Comment cmnt(masm_, "[ DoWhileStatement");
3185  CodeForStatementPosition(node);
3186  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3187  JumpTarget body(JumpTarget::BIDIRECTIONAL);
3188  IncrementLoopNesting();
3189
3190  ConditionAnalysis info = AnalyzeCondition(node->cond());
3191  // Label the top of the loop for the backward jump if necessary.
3192  switch (info) {
3193    case ALWAYS_TRUE:
3194      // Use the continue target.
3195      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3196      node->continue_target()->Bind();
3197      break;
3198    case ALWAYS_FALSE:
3199      // No need to label it.
3200      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3201      break;
3202    case DONT_KNOW:
3203      // Continue is the test, so use the backward body target.
3204      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3205      body.Bind();
3206      break;
3207  }
3208
3209  CheckStack();  // TODO(1222600): ignore if body contains calls.
3210  Visit(node->body());
3211
3212  // Compile the test.
3213  switch (info) {
3214    case ALWAYS_TRUE:
3215      // If control flow can fall off the end of the body, jump back
3216      // to the top and bind the break target at the exit.
3217      if (has_valid_frame()) {
3218        node->continue_target()->Jump();
3219      }
3220      if (node->break_target()->is_linked()) {
3221        node->break_target()->Bind();
3222      }
3223      break;
3224    case ALWAYS_FALSE:
3225      // We may have had continues or breaks in the body.
3226      if (node->continue_target()->is_linked()) {
3227        node->continue_target()->Bind();
3228      }
3229      if (node->break_target()->is_linked()) {
3230        node->break_target()->Bind();
3231      }
3232      break;
3233    case DONT_KNOW:
3234      // We have to compile the test expression if it can be reached by
3235      // control flow falling out of the body or via continue.
3236      if (node->continue_target()->is_linked()) {
3237        node->continue_target()->Bind();
3238      }
3239      if (has_valid_frame()) {
3240        Comment cmnt(masm_, "[ DoWhileCondition");
3241        CodeForDoWhileConditionPosition(node);
3242        ControlDestination dest(&body, node->break_target(), false);
3243        LoadCondition(node->cond(), &dest, true);
3244      }
3245      if (node->break_target()->is_linked()) {
3246        node->break_target()->Bind();
3247      }
3248      break;
3249  }
3250
3251  DecrementLoopNesting();
3252  node->continue_target()->Unuse();
3253  node->break_target()->Unuse();
3254}
3255
3256
3257void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
3258  ASSERT(!in_spilled_code());
3259  Comment cmnt(masm_, "[ WhileStatement");
3260  CodeForStatementPosition(node);
3261
3262  // If the condition is always false and has no side effects, we do not
3263  // need to compile anything.
3264  ConditionAnalysis info = AnalyzeCondition(node->cond());
3265  if (info == ALWAYS_FALSE) return;
3266
3267  // Do not duplicate conditions that may have function literal
3268  // subexpressions.  This can cause us to compile the function literal
3269  // twice.
3270  bool test_at_bottom = !node->may_have_function_literal();
3271  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3272  IncrementLoopNesting();
3273  JumpTarget body;
3274  if (test_at_bottom) {
3275    body.set_direction(JumpTarget::BIDIRECTIONAL);
3276  }
3277
3278  // Based on the condition analysis, compile the test as necessary.
3279  switch (info) {
3280    case ALWAYS_TRUE:
3281      // We will not compile the test expression.  Label the top of the
3282      // loop with the continue target.
3283      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3284      node->continue_target()->Bind();
3285      break;
3286    case DONT_KNOW: {
3287      if (test_at_bottom) {
3288        // Continue is the test at the bottom, no need to label the test
3289        // at the top.  The body is a backward target.
3290        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3291      } else {
3292        // Label the test at the top as the continue target.  The body
3293        // is a forward-only target.
3294        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3295        node->continue_target()->Bind();
3296      }
3297      // Compile the test with the body as the true target and preferred
3298      // fall-through and with the break target as the false target.
3299      ControlDestination dest(&body, node->break_target(), true);
3300      LoadCondition(node->cond(), &dest, true);
3301
3302      if (dest.false_was_fall_through()) {
3303        // If we got the break target as fall-through, the test may have
3304        // been unconditionally false (if there are no jumps to the
3305        // body).
3306        if (!body.is_linked()) {
3307          DecrementLoopNesting();
3308          return;
3309        }
3310
3311        // Otherwise, jump around the body on the fall through and then
3312        // bind the body target.
3313        node->break_target()->Unuse();
3314        node->break_target()->Jump();
3315        body.Bind();
3316      }
3317      break;
3318    }
3319    case ALWAYS_FALSE:
3320      UNREACHABLE();
3321      break;
3322  }
3323
3324  CheckStack();  // TODO(1222600): ignore if body contains calls.
3325  Visit(node->body());
3326
3327  // Based on the condition analysis, compile the backward jump as
3328  // necessary.
3329  switch (info) {
3330    case ALWAYS_TRUE:
3331      // The loop body has been labeled with the continue target.
3332      if (has_valid_frame()) {
3333        node->continue_target()->Jump();
3334      }
3335      break;
3336    case DONT_KNOW:
3337      if (test_at_bottom) {
3338        // If we have chosen to recompile the test at the bottom,
3339        // then it is the continue target.
3340        if (node->continue_target()->is_linked()) {
3341          node->continue_target()->Bind();
3342        }
3343        if (has_valid_frame()) {
3344          // The break target is the fall-through (body is a backward
3345          // jump from here and thus an invalid fall-through).
3346          ControlDestination dest(&body, node->break_target(), false);
3347          LoadCondition(node->cond(), &dest, true);
3348        }
3349      } else {
3350        // If we have chosen not to recompile the test at the bottom,
3351        // jump back to the one at the top.
3352        if (has_valid_frame()) {
3353          node->continue_target()->Jump();
3354        }
3355      }
3356      break;
3357    case ALWAYS_FALSE:
3358      UNREACHABLE();
3359      break;
3360  }
3361
3362  // The break target may be already bound (by the condition), or there
3363  // may not be a valid frame.  Bind it only if needed.
3364  if (node->break_target()->is_linked()) {
3365    node->break_target()->Bind();
3366  }
3367  DecrementLoopNesting();
3368}
3369
3370
3371void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
3372  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
3373  if (slot->type() == Slot::LOCAL) {
3374    frame_->SetTypeForLocalAt(slot->index(), info);
3375  } else {
3376    frame_->SetTypeForParamAt(slot->index(), info);
3377  }
3378  if (FLAG_debug_code && info.IsSmi()) {
3379    if (slot->type() == Slot::LOCAL) {
3380      frame_->PushLocalAt(slot->index());
3381    } else {
3382      frame_->PushParameterAt(slot->index());
3383    }
3384    Result var = frame_->Pop();
3385    var.ToRegister();
3386    __ AbortIfNotSmi(var.reg());
3387  }
3388}
3389
3390
3391void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
3392  // A fast smi loop is a for loop with an initializer
3393  // that is a simple assignment of a smi to a stack variable,
3394  // a test that is a simple test of that variable against a smi constant,
3395  // and a step that is a increment/decrement of the variable, and
3396  // where the variable isn't modified in the loop body.
3397  // This guarantees that the variable is always a smi.
3398
3399  Variable* loop_var = node->loop_variable();
3400  Smi* initial_value = *Handle<Smi>::cast(node->init()
3401      ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
3402  Smi* limit_value = *Handle<Smi>::cast(
3403      node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
3404  Token::Value compare_op =
3405      node->cond()->AsCompareOperation()->op();
3406  bool increments =
3407      node->next()->StatementAsCountOperation()->op() == Token::INC;
3408
3409  // Check that the condition isn't initially false.
3410  bool initially_false = false;
3411  int initial_int_value = initial_value->value();
3412  int limit_int_value = limit_value->value();
3413  switch (compare_op) {
3414    case Token::LT:
3415      initially_false = initial_int_value >= limit_int_value;
3416      break;
3417    case Token::LTE:
3418      initially_false = initial_int_value > limit_int_value;
3419      break;
3420    case Token::GT:
3421      initially_false = initial_int_value <= limit_int_value;
3422      break;
3423    case Token::GTE:
3424      initially_false = initial_int_value < limit_int_value;
3425      break;
3426    default:
3427      UNREACHABLE();
3428  }
3429  if (initially_false) return;
3430
3431  // Only check loop condition at the end.
3432
3433  Visit(node->init());
3434
3435  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
3436  // Set type and stack height of BreakTargets.
3437  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3438  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3439
3440  IncrementLoopNesting();
3441  loop.Bind();
3442
3443  // Set number type of the loop variable to smi.
3444  CheckStack();  // TODO(1222600): ignore if body contains calls.
3445
3446  SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
3447  Visit(node->body());
3448
3449  if (node->continue_target()->is_linked()) {
3450    node->continue_target()->Bind();
3451  }
3452
3453  if (has_valid_frame()) {
3454    CodeForStatementPosition(node);
3455    Slot* loop_var_slot = loop_var->AsSlot();
3456    if (loop_var_slot->type() == Slot::LOCAL) {
3457      frame_->TakeLocalAt(loop_var_slot->index());
3458    } else {
3459      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
3460      frame_->TakeParameterAt(loop_var_slot->index());
3461    }
3462    Result loop_var_result = frame_->Pop();
3463    if (!loop_var_result.is_register()) {
3464      loop_var_result.ToRegister();
3465    }
3466    Register loop_var_reg = loop_var_result.reg();
3467    frame_->Spill(loop_var_reg);
3468    if (increments) {
3469      __ SmiAddConstant(loop_var_reg,
3470                        loop_var_reg,
3471                        Smi::FromInt(1));
3472    } else {
3473      __ SmiSubConstant(loop_var_reg,
3474                        loop_var_reg,
3475                        Smi::FromInt(1));
3476    }
3477
3478    frame_->Push(&loop_var_result);
3479    if (loop_var_slot->type() == Slot::LOCAL) {
3480      frame_->StoreToLocalAt(loop_var_slot->index());
3481    } else {
3482      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
3483      frame_->StoreToParameterAt(loop_var_slot->index());
3484    }
3485    frame_->Drop();
3486
3487    __ SmiCompare(loop_var_reg, limit_value);
3488    Condition condition;
3489    switch (compare_op) {
3490      case Token::LT:
3491        condition = less;
3492        break;
3493      case Token::LTE:
3494        condition = less_equal;
3495        break;
3496      case Token::GT:
3497        condition = greater;
3498        break;
3499      case Token::GTE:
3500        condition = greater_equal;
3501        break;
3502      default:
3503        condition = never;
3504        UNREACHABLE();
3505    }
3506    loop.Branch(condition);
3507  }
3508  if (node->break_target()->is_linked()) {
3509    node->break_target()->Bind();
3510  }
3511  DecrementLoopNesting();
3512}
3513
3514
3515void CodeGenerator::VisitForStatement(ForStatement* node) {
3516  ASSERT(!in_spilled_code());
3517  Comment cmnt(masm_, "[ ForStatement");
3518  CodeForStatementPosition(node);
3519
3520  if (node->is_fast_smi_loop()) {
3521    GenerateFastSmiLoop(node);
3522    return;
3523  }
3524
3525  // Compile the init expression if present.
3526  if (node->init() != NULL) {
3527    Visit(node->init());
3528  }
3529
3530  // If the condition is always false and has no side effects, we do not
3531  // need to compile anything else.
3532  ConditionAnalysis info = AnalyzeCondition(node->cond());
3533  if (info == ALWAYS_FALSE) return;
3534
3535  // Do not duplicate conditions that may have function literal
3536  // subexpressions.  This can cause us to compile the function literal
3537  // twice.
3538  bool test_at_bottom = !node->may_have_function_literal();
3539  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3540  IncrementLoopNesting();
3541
3542  // Target for backward edge if no test at the bottom, otherwise
3543  // unused.
3544  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
3545
3546  // Target for backward edge if there is a test at the bottom,
3547  // otherwise used as target for test at the top.
3548  JumpTarget body;
3549  if (test_at_bottom) {
3550    body.set_direction(JumpTarget::BIDIRECTIONAL);
3551  }
3552
3553  // Based on the condition analysis, compile the test as necessary.
3554  switch (info) {
3555    case ALWAYS_TRUE:
3556      // We will not compile the test expression.  Label the top of the
3557      // loop.
3558      if (node->next() == NULL) {
3559        // Use the continue target if there is no update expression.
3560        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3561        node->continue_target()->Bind();
3562      } else {
3563        // Otherwise use the backward loop target.
3564        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3565        loop.Bind();
3566      }
3567      break;
3568    case DONT_KNOW: {
3569      if (test_at_bottom) {
3570        // Continue is either the update expression or the test at the
3571        // bottom, no need to label the test at the top.
3572        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3573      } else if (node->next() == NULL) {
3574        // We are not recompiling the test at the bottom and there is no
3575        // update expression.
3576        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
3577        node->continue_target()->Bind();
3578      } else {
3579        // We are not recompiling the test at the bottom and there is an
3580        // update expression.
3581        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3582        loop.Bind();
3583      }
3584
3585      // Compile the test with the body as the true target and preferred
3586      // fall-through and with the break target as the false target.
3587      ControlDestination dest(&body, node->break_target(), true);
3588      LoadCondition(node->cond(), &dest, true);
3589
3590      if (dest.false_was_fall_through()) {
3591        // If we got the break target as fall-through, the test may have
3592        // been unconditionally false (if there are no jumps to the
3593        // body).
3594        if (!body.is_linked()) {
3595          DecrementLoopNesting();
3596          return;
3597        }
3598
3599        // Otherwise, jump around the body on the fall through and then
3600        // bind the body target.
3601        node->break_target()->Unuse();
3602        node->break_target()->Jump();
3603        body.Bind();
3604      }
3605      break;
3606    }
3607    case ALWAYS_FALSE:
3608      UNREACHABLE();
3609      break;
3610  }
3611
3612  CheckStack();  // TODO(1222600): ignore if body contains calls.
3613
3614  Visit(node->body());
3615
3616  // If there is an update expression, compile it if necessary.
3617  if (node->next() != NULL) {
3618    if (node->continue_target()->is_linked()) {
3619      node->continue_target()->Bind();
3620    }
3621
3622    // Control can reach the update by falling out of the body or by a
3623    // continue.
3624    if (has_valid_frame()) {
3625      // Record the source position of the statement as this code which
3626      // is after the code for the body actually belongs to the loop
3627      // statement and not the body.
3628      CodeForStatementPosition(node);
3629      Visit(node->next());
3630    }
3631  }
3632
3633  // Based on the condition analysis, compile the backward jump as
3634  // necessary.
3635  switch (info) {
3636    case ALWAYS_TRUE:
3637      if (has_valid_frame()) {
3638        if (node->next() == NULL) {
3639          node->continue_target()->Jump();
3640        } else {
3641          loop.Jump();
3642        }
3643      }
3644      break;
3645    case DONT_KNOW:
3646      if (test_at_bottom) {
3647        if (node->continue_target()->is_linked()) {
3648          // We can have dangling jumps to the continue target if there
3649          // was no update expression.
3650          node->continue_target()->Bind();
3651        }
3652        // Control can reach the test at the bottom by falling out of
3653        // the body, by a continue in the body, or from the update
3654        // expression.
3655        if (has_valid_frame()) {
3656          // The break target is the fall-through (body is a backward
3657          // jump from here).
3658          ControlDestination dest(&body, node->break_target(), false);
3659          LoadCondition(node->cond(), &dest, true);
3660        }
3661      } else {
3662        // Otherwise, jump back to the test at the top.
3663        if (has_valid_frame()) {
3664          if (node->next() == NULL) {
3665            node->continue_target()->Jump();
3666          } else {
3667            loop.Jump();
3668          }
3669        }
3670      }
3671      break;
3672    case ALWAYS_FALSE:
3673      UNREACHABLE();
3674      break;
3675  }
3676
3677  // The break target may be already bound (by the condition), or there
3678  // may not be a valid frame.  Bind it only if needed.
3679  if (node->break_target()->is_linked()) {
3680    node->break_target()->Bind();
3681  }
3682  DecrementLoopNesting();
3683}
3684
3685
3686void CodeGenerator::VisitForInStatement(ForInStatement* node) {
3687  ASSERT(!in_spilled_code());
3688  VirtualFrame::SpilledScope spilled_scope;
3689  Comment cmnt(masm_, "[ ForInStatement");
3690  CodeForStatementPosition(node);
3691
3692  JumpTarget primitive;
3693  JumpTarget jsobject;
3694  JumpTarget fixed_array;
3695  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
3696  JumpTarget end_del_check;
3697  JumpTarget exit;
3698
3699  // Get the object to enumerate over (converted to JSObject).
3700  LoadAndSpill(node->enumerable());
3701
3702  // Both SpiderMonkey and kjs ignore null and undefined in contrast
3703  // to the specification.  12.6.4 mandates a call to ToObject.
3704  frame_->EmitPop(rax);
3705
3706  // rax: value to be iterated over
3707  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
3708  exit.Branch(equal);
3709  __ CompareRoot(rax, Heap::kNullValueRootIndex);
3710  exit.Branch(equal);
3711
3712  // Stack layout in body:
3713  // [iteration counter (smi)] <- slot 0
3714  // [length of array]         <- slot 1
3715  // [FixedArray]              <- slot 2
3716  // [Map or 0]                <- slot 3
3717  // [Object]                  <- slot 4
3718
3719  // Check if enumerable is already a JSObject
3720  // rax: value to be iterated over
3721  Condition is_smi = masm_->CheckSmi(rax);
3722  primitive.Branch(is_smi);
3723  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
3724  jsobject.Branch(above_equal);
3725
3726  primitive.Bind();
3727  frame_->EmitPush(rax);
3728  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
3729  // function call returns the value in rax, which is where we want it below
3730
3731  jsobject.Bind();
3732  // Get the set of properties (as a FixedArray or Map).
3733  // rax: value to be iterated over
3734  frame_->EmitPush(rax);  // Push the object being iterated over.
3735
3736
3737  // Check cache validity in generated code. This is a fast case for
3738  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
3739  // guarantee cache validity, call the runtime system to check cache
3740  // validity or get the property names in a fixed array.
3741  JumpTarget call_runtime;
3742  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
3743  JumpTarget check_prototype;
3744  JumpTarget use_cache;
3745  __ movq(rcx, rax);
3746  loop.Bind();
3747  // Check that there are no elements.
3748  __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
3749  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
3750  call_runtime.Branch(not_equal);
3751  // Check that instance descriptors are not empty so that we can
3752  // check for an enum cache.  Leave the map in ebx for the subsequent
3753  // prototype load.
3754  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
3755  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
3756  __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
3757  call_runtime.Branch(equal);
3758  // Check that there in an enum cache in the non-empty instance
3759  // descriptors.  This is the case if the next enumeration index
3760  // field does not contain a smi.
3761  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
3762  is_smi = masm_->CheckSmi(rdx);
3763  call_runtime.Branch(is_smi);
3764  // For all objects but the receiver, check that the cache is empty.
3765  __ cmpq(rcx, rax);
3766  check_prototype.Branch(equal);
3767  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
3768  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
3769  call_runtime.Branch(not_equal);
3770  check_prototype.Bind();
3771  // Load the prototype from the map and loop if non-null.
3772  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
3773  __ CompareRoot(rcx, Heap::kNullValueRootIndex);
3774  loop.Branch(not_equal);
3775  // The enum cache is valid.  Load the map of the object being
3776  // iterated over and use the cache for the iteration.
3777  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
3778  use_cache.Jump();
3779
3780  call_runtime.Bind();
3781  // Call the runtime to get the property names for the object.
3782  frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
3783  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
3784
3785  // If we got a Map, we can do a fast modification check.
3786  // Otherwise, we got a FixedArray, and we have to do a slow check.
3787  // rax: map or fixed array (result from call to
3788  // Runtime::kGetPropertyNamesFast)
3789  __ movq(rdx, rax);
3790  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
3791  __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
3792  fixed_array.Branch(not_equal);
3793
3794  use_cache.Bind();
3795  // Get enum cache
3796  // rax: map (either the result from a call to
3797  // Runtime::kGetPropertyNamesFast or has been fetched directly from
3798  // the object)
3799  __ movq(rcx, rax);
3800  __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
3801  // Get the bridge array held in the enumeration index field.
3802  __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
3803  // Get the cache from the bridge array.
3804  __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
3805
3806  frame_->EmitPush(rax);  // <- slot 3
3807  frame_->EmitPush(rdx);  // <- slot 2
3808  __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
3809  frame_->EmitPush(rax);  // <- slot 1
3810  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
3811  entry.Jump();
3812
3813  fixed_array.Bind();
3814  // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
3815  frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
3816  frame_->EmitPush(rax);  // <- slot 2
3817
3818  // Push the length of the array and the initial index onto the stack.
3819  __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
3820  frame_->EmitPush(rax);  // <- slot 1
3821  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
3822
3823  // Condition.
3824  entry.Bind();
3825  // Grab the current frame's height for the break and continue
3826  // targets only after all the state is pushed on the frame.
3827  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
3828  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
3829
3830  __ movq(rax, frame_->ElementAt(0));  // load the current count
3831  __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
3832  node->break_target()->Branch(below_equal);
3833
3834  // Get the i'th entry of the array.
3835  __ movq(rdx, frame_->ElementAt(2));
3836  SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
3837  __ movq(rbx,
3838          FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
3839
3840  // Get the expected map from the stack or a zero map in the
3841  // permanent slow case rax: current iteration count rbx: i'th entry
3842  // of the enum cache
3843  __ movq(rdx, frame_->ElementAt(3));
3844  // Check if the expected map still matches that of the enumerable.
3845  // If not, we have to filter the key.
3846  // rax: current iteration count
3847  // rbx: i'th entry of the enum cache
3848  // rdx: expected map value
3849  __ movq(rcx, frame_->ElementAt(4));
3850  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
3851  __ cmpq(rcx, rdx);
3852  end_del_check.Branch(equal);
3853
3854  // Convert the entry to a string (or null if it isn't a property anymore).
3855  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
3856  frame_->EmitPush(rbx);  // push entry
3857  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
3858  __ movq(rbx, rax);
3859
3860  // If the property has been removed while iterating, we just skip it.
3861  __ Cmp(rbx, Smi::FromInt(0));
3862  node->continue_target()->Branch(equal);
3863
3864  end_del_check.Bind();
3865  // Store the entry in the 'each' expression and take another spin in the
3866  // loop.  rdx: i'th entry of the enum cache (or string there of)
3867  frame_->EmitPush(rbx);
3868  { Reference each(this, node->each());
3869    // Loading a reference may leave the frame in an unspilled state.
3870    frame_->SpillAll();
3871    if (!each.is_illegal()) {
3872      if (each.size() > 0) {
3873        frame_->EmitPush(frame_->ElementAt(each.size()));
3874        each.SetValue(NOT_CONST_INIT);
3875        frame_->Drop(2);  // Drop the original and the copy of the element.
3876      } else {
3877        // If the reference has size zero then we can use the value below
3878        // the reference as if it were above the reference, instead of pushing
3879        // a new copy of it above the reference.
3880        each.SetValue(NOT_CONST_INIT);
3881        frame_->Drop();  // Drop the original of the element.
3882      }
3883    }
3884  }
3885  // Unloading a reference may leave the frame in an unspilled state.
3886  frame_->SpillAll();
3887
3888  // Body.
3889  CheckStack();  // TODO(1222600): ignore if body contains calls.
3890  VisitAndSpill(node->body());
3891
3892  // Next.  Reestablish a spilled frame in case we are coming here via
3893  // a continue in the body.
3894  node->continue_target()->Bind();
3895  frame_->SpillAll();
3896  frame_->EmitPop(rax);
3897  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
3898  frame_->EmitPush(rax);
3899  entry.Jump();
3900
3901  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
3902  // any frame.
3903  node->break_target()->Bind();
3904  frame_->Drop(5);
3905
3906  // Exit.
3907  exit.Bind();
3908
3909  node->continue_target()->Unuse();
3910  node->break_target()->Unuse();
3911}
3912
3913
3914void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
3915  ASSERT(!in_spilled_code());
3916  VirtualFrame::SpilledScope spilled_scope;
3917  Comment cmnt(masm_, "[ TryCatchStatement");
3918  CodeForStatementPosition(node);
3919
3920  JumpTarget try_block;
3921  JumpTarget exit;
3922
3923  try_block.Call();
3924  // --- Catch block ---
3925  frame_->EmitPush(rax);
3926
3927  // Store the caught exception in the catch variable.
3928  Variable* catch_var = node->catch_var()->var();
3929  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
3930  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
3931
3932  // Remove the exception from the stack.
3933  frame_->Drop();
3934
3935  VisitStatementsAndSpill(node->catch_block()->statements());
3936  if (has_valid_frame()) {
3937    exit.Jump();
3938  }
3939
3940
3941  // --- Try block ---
3942  try_block.Bind();
3943
3944  frame_->PushTryHandler(TRY_CATCH_HANDLER);
3945  int handler_height = frame_->height();
3946
3947  // Shadow the jump targets for all escapes from the try block, including
3948  // returns.  During shadowing, the original target is hidden as the
3949  // ShadowTarget and operations on the original actually affect the
3950  // shadowing target.
3951  //
3952  // We should probably try to unify the escaping targets and the return
3953  // target.
3954  int nof_escapes = node->escaping_targets()->length();
3955  List<ShadowTarget*> shadows(1 + nof_escapes);
3956
3957  // Add the shadow target for the function return.
3958  static const int kReturnShadowIndex = 0;
3959  shadows.Add(new ShadowTarget(&function_return_));
3960  bool function_return_was_shadowed = function_return_is_shadowed_;
3961  function_return_is_shadowed_ = true;
3962  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
3963
3964  // Add the remaining shadow targets.
3965  for (int i = 0; i < nof_escapes; i++) {
3966    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
3967  }
3968
3969  // Generate code for the statements in the try block.
3970  VisitStatementsAndSpill(node->try_block()->statements());
3971
3972  // Stop the introduced shadowing and count the number of required unlinks.
3973  // After shadowing stops, the original targets are unshadowed and the
3974  // ShadowTargets represent the formerly shadowing targets.
3975  bool has_unlinks = false;
3976  for (int i = 0; i < shadows.length(); i++) {
3977    shadows[i]->StopShadowing();
3978    has_unlinks = has_unlinks || shadows[i]->is_linked();
3979  }
3980  function_return_is_shadowed_ = function_return_was_shadowed;
3981
3982  // Get an external reference to the handler address.
3983  ExternalReference handler_address(Isolate::k_handler_address, isolate());
3984
3985  // Make sure that there's nothing left on the stack above the
3986  // handler structure.
3987  if (FLAG_debug_code) {
3988    __ movq(kScratchRegister, handler_address);
3989    __ cmpq(rsp, Operand(kScratchRegister, 0));
3990    __ Assert(equal, "stack pointer should point to top handler");
3991  }
3992
3993  // If we can fall off the end of the try block, unlink from try chain.
3994  if (has_valid_frame()) {
3995    // The next handler address is on top of the frame.  Unlink from
3996    // the handler list and drop the rest of this handler from the
3997    // frame.
3998    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3999    __ movq(kScratchRegister, handler_address);
4000    frame_->EmitPop(Operand(kScratchRegister, 0));
4001    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4002    if (has_unlinks) {
4003      exit.Jump();
4004    }
4005  }
4006
4007  // Generate unlink code for the (formerly) shadowing targets that
4008  // have been jumped to.  Deallocate each shadow target.
4009  Result return_value;
4010  for (int i = 0; i < shadows.length(); i++) {
4011    if (shadows[i]->is_linked()) {
4012      // Unlink from try chain; be careful not to destroy the TOS if
4013      // there is one.
4014      if (i == kReturnShadowIndex) {
4015        shadows[i]->Bind(&return_value);
4016        return_value.ToRegister(rax);
4017      } else {
4018        shadows[i]->Bind();
4019      }
4020      // Because we can be jumping here (to spilled code) from
4021      // unspilled code, we need to reestablish a spilled frame at
4022      // this block.
4023      frame_->SpillAll();
4024
4025      // Reload sp from the top handler, because some statements that we
4026      // break from (eg, for...in) may have left stuff on the stack.
4027      __ movq(kScratchRegister, handler_address);
4028      __ movq(rsp, Operand(kScratchRegister, 0));
4029      frame_->Forget(frame_->height() - handler_height);
4030
4031      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4032      __ movq(kScratchRegister, handler_address);
4033      frame_->EmitPop(Operand(kScratchRegister, 0));
4034      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4035
4036      if (i == kReturnShadowIndex) {
4037        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
4038        shadows[i]->other_target()->Jump(&return_value);
4039      } else {
4040        shadows[i]->other_target()->Jump();
4041      }
4042    }
4043  }
4044
4045  exit.Bind();
4046}
4047
4048
4049void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
4050  ASSERT(!in_spilled_code());
4051  VirtualFrame::SpilledScope spilled_scope;
4052  Comment cmnt(masm_, "[ TryFinallyStatement");
4053  CodeForStatementPosition(node);
4054
4055  // State: Used to keep track of reason for entering the finally
4056  // block. Should probably be extended to hold information for
4057  // break/continue from within the try block.
4058  enum { FALLING, THROWING, JUMPING };
4059
4060  JumpTarget try_block;
4061  JumpTarget finally_block;
4062
4063  try_block.Call();
4064
4065  frame_->EmitPush(rax);
4066  // In case of thrown exceptions, this is where we continue.
4067  __ Move(rcx, Smi::FromInt(THROWING));
4068  finally_block.Jump();
4069
4070  // --- Try block ---
4071  try_block.Bind();
4072
4073  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
4074  int handler_height = frame_->height();
4075
4076  // Shadow the jump targets for all escapes from the try block, including
4077  // returns.  During shadowing, the original target is hidden as the
4078  // ShadowTarget and operations on the original actually affect the
4079  // shadowing target.
4080  //
4081  // We should probably try to unify the escaping targets and the return
4082  // target.
4083  int nof_escapes = node->escaping_targets()->length();
4084  List<ShadowTarget*> shadows(1 + nof_escapes);
4085
4086  // Add the shadow target for the function return.
4087  static const int kReturnShadowIndex = 0;
4088  shadows.Add(new ShadowTarget(&function_return_));
4089  bool function_return_was_shadowed = function_return_is_shadowed_;
4090  function_return_is_shadowed_ = true;
4091  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
4092
4093  // Add the remaining shadow targets.
4094  for (int i = 0; i < nof_escapes; i++) {
4095    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
4096  }
4097
4098  // Generate code for the statements in the try block.
4099  VisitStatementsAndSpill(node->try_block()->statements());
4100
4101  // Stop the introduced shadowing and count the number of required unlinks.
4102  // After shadowing stops, the original targets are unshadowed and the
4103  // ShadowTargets represent the formerly shadowing targets.
4104  int nof_unlinks = 0;
4105  for (int i = 0; i < shadows.length(); i++) {
4106    shadows[i]->StopShadowing();
4107    if (shadows[i]->is_linked()) nof_unlinks++;
4108  }
4109  function_return_is_shadowed_ = function_return_was_shadowed;
4110
4111  // Get an external reference to the handler address.
4112  ExternalReference handler_address(Isolate::k_handler_address, isolate());
4113
4114  // If we can fall off the end of the try block, unlink from the try
4115  // chain and set the state on the frame to FALLING.
4116  if (has_valid_frame()) {
4117    // The next handler address is on top of the frame.
4118    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4119    __ movq(kScratchRegister, handler_address);
4120    frame_->EmitPop(Operand(kScratchRegister, 0));
4121    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4122
4123    // Fake a top of stack value (unneeded when FALLING) and set the
4124    // state in ecx, then jump around the unlink blocks if any.
4125    frame_->EmitPush(Heap::kUndefinedValueRootIndex);
4126    __ Move(rcx, Smi::FromInt(FALLING));
4127    if (nof_unlinks > 0) {
4128      finally_block.Jump();
4129    }
4130  }
4131
4132  // Generate code to unlink and set the state for the (formerly)
4133  // shadowing targets that have been jumped to.
4134  for (int i = 0; i < shadows.length(); i++) {
4135    if (shadows[i]->is_linked()) {
4136      // If we have come from the shadowed return, the return value is
4137      // on the virtual frame.  We must preserve it until it is
4138      // pushed.
4139      if (i == kReturnShadowIndex) {
4140        Result return_value;
4141        shadows[i]->Bind(&return_value);
4142        return_value.ToRegister(rax);
4143      } else {
4144        shadows[i]->Bind();
4145      }
4146      // Because we can be jumping here (to spilled code) from
4147      // unspilled code, we need to reestablish a spilled frame at
4148      // this block.
4149      frame_->SpillAll();
4150
4151      // Reload sp from the top handler, because some statements that
4152      // we break from (eg, for...in) may have left stuff on the
4153      // stack.
4154      __ movq(kScratchRegister, handler_address);
4155      __ movq(rsp, Operand(kScratchRegister, 0));
4156      frame_->Forget(frame_->height() - handler_height);
4157
4158      // Unlink this handler and drop it from the frame.
4159      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4160      __ movq(kScratchRegister, handler_address);
4161      frame_->EmitPop(Operand(kScratchRegister, 0));
4162      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
4163
4164      if (i == kReturnShadowIndex) {
4165        // If this target shadowed the function return, materialize
4166        // the return value on the stack.
4167        frame_->EmitPush(rax);
4168      } else {
4169        // Fake TOS for targets that shadowed breaks and continues.
4170        frame_->EmitPush(Heap::kUndefinedValueRootIndex);
4171      }
4172      __ Move(rcx, Smi::FromInt(JUMPING + i));
4173      if (--nof_unlinks > 0) {
4174        // If this is not the last unlink block, jump around the next.
4175        finally_block.Jump();
4176      }
4177    }
4178  }
4179
4180  // --- Finally block ---
4181  finally_block.Bind();
4182
4183  // Push the state on the stack.
4184  frame_->EmitPush(rcx);
4185
4186  // We keep two elements on the stack - the (possibly faked) result
4187  // and the state - while evaluating the finally block.
4188  //
4189  // Generate code for the statements in the finally block.
4190  VisitStatementsAndSpill(node->finally_block()->statements());
4191
4192  if (has_valid_frame()) {
4193    // Restore state and return value or faked TOS.
4194    frame_->EmitPop(rcx);
4195    frame_->EmitPop(rax);
4196  }
4197
4198  // Generate code to jump to the right destination for all used
4199  // formerly shadowing targets.  Deallocate each shadow target.
4200  for (int i = 0; i < shadows.length(); i++) {
4201    if (has_valid_frame() && shadows[i]->is_bound()) {
4202      BreakTarget* original = shadows[i]->other_target();
4203      __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
4204      if (i == kReturnShadowIndex) {
4205        // The return value is (already) in rax.
4206        Result return_value = allocator_->Allocate(rax);
4207        ASSERT(return_value.is_valid());
4208        if (function_return_is_shadowed_) {
4209          original->Branch(equal, &return_value);
4210        } else {
4211          // Branch around the preparation for return which may emit
4212          // code.
4213          JumpTarget skip;
4214          skip.Branch(not_equal);
4215          frame_->PrepareForReturn();
4216          original->Jump(&return_value);
4217          skip.Bind();
4218        }
4219      } else {
4220        original->Branch(equal);
4221      }
4222    }
4223  }
4224
4225  if (has_valid_frame()) {
4226    // Check if we need to rethrow the exception.
4227    JumpTarget exit;
4228    __ SmiCompare(rcx, Smi::FromInt(THROWING));
4229    exit.Branch(not_equal);
4230
4231    // Rethrow exception.
4232    frame_->EmitPush(rax);  // undo pop from above
4233    frame_->CallRuntime(Runtime::kReThrow, 1);
4234
4235    // Done.
4236    exit.Bind();
4237  }
4238}
4239
4240
4241void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
4242  ASSERT(!in_spilled_code());
4243  Comment cmnt(masm_, "[ DebuggerStatement");
4244  CodeForStatementPosition(node);
4245#ifdef ENABLE_DEBUGGER_SUPPORT
4246  // Spill everything, even constants, to the frame.
4247  frame_->SpillAll();
4248
4249  frame_->DebugBreak();
4250  // Ignore the return value.
4251#endif
4252}
4253
4254
4255void CodeGenerator::InstantiateFunction(
4256    Handle<SharedFunctionInfo> function_info,
4257    bool pretenure) {
4258  // The inevitable call will sync frame elements to memory anyway, so
4259  // we do it eagerly to allow us to push the arguments directly into
4260  // place.
4261  frame_->SyncRange(0, frame_->element_count() - 1);
4262
4263  // Use the fast case closure allocation code that allocates in new
4264  // space for nested functions that don't need literals cloning.
4265  if (!pretenure &&
4266      scope()->is_function_scope() &&
4267      function_info->num_literals() == 0) {
4268    FastNewClosureStub stub(
4269        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
4270    frame_->Push(function_info);
4271    Result answer = frame_->CallStub(&stub, 1);
4272    frame_->Push(&answer);
4273  } else {
4274    // Call the runtime to instantiate the function based on the
4275    // shared function info.
4276    frame_->EmitPush(rsi);
4277    frame_->EmitPush(function_info);
4278    frame_->EmitPush(pretenure
4279                     ? FACTORY->true_value()
4280                     : FACTORY->false_value());
4281    Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
4282    frame_->Push(&result);
4283  }
4284}
4285
4286
4287void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
4288  Comment cmnt(masm_, "[ FunctionLiteral");
4289
4290  // Build the function info and instantiate it.
4291  Handle<SharedFunctionInfo> function_info =
4292      Compiler::BuildFunctionInfo(node, script());
4293  // Check for stack-overflow exception.
4294  if (function_info.is_null()) {
4295    SetStackOverflow();
4296    return;
4297  }
4298  InstantiateFunction(function_info, node->pretenure());
4299}
4300
4301
4302void CodeGenerator::VisitSharedFunctionInfoLiteral(
4303    SharedFunctionInfoLiteral* node) {
4304  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
4305  InstantiateFunction(node->shared_function_info(), false);
4306}
4307
4308
4309void CodeGenerator::VisitConditional(Conditional* node) {
4310  Comment cmnt(masm_, "[ Conditional");
4311  JumpTarget then;
4312  JumpTarget else_;
4313  JumpTarget exit;
4314  ControlDestination dest(&then, &else_, true);
4315  LoadCondition(node->condition(), &dest, true);
4316
4317  if (dest.false_was_fall_through()) {
4318    // The else target was bound, so we compile the else part first.
4319    Load(node->else_expression());
4320
4321    if (then.is_linked()) {
4322      exit.Jump();
4323      then.Bind();
4324      Load(node->then_expression());
4325    }
4326  } else {
4327    // The then target was bound, so we compile the then part first.
4328    Load(node->then_expression());
4329
4330    if (else_.is_linked()) {
4331      exit.Jump();
4332      else_.Bind();
4333      Load(node->else_expression());
4334    }
4335  }
4336
4337  exit.Bind();
4338}
4339
4340
4341void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4342  if (slot->type() == Slot::LOOKUP) {
4343    ASSERT(slot->var()->is_dynamic());
4344
4345    JumpTarget slow;
4346    JumpTarget done;
4347    Result value;
4348
4349    // Generate fast case for loading from slots that correspond to
4350    // local/global variables or arguments unless they are shadowed by
4351    // eval-introduced bindings.
4352    EmitDynamicLoadFromSlotFastCase(slot,
4353                                    typeof_state,
4354                                    &value,
4355                                    &slow,
4356                                    &done);
4357
4358    slow.Bind();
4359    // A runtime call is inevitable.  We eagerly sync frame elements
4360    // to memory so that we can push the arguments directly into place
4361    // on top of the frame.
4362    frame_->SyncRange(0, frame_->element_count() - 1);
4363    frame_->EmitPush(rsi);
4364    __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
4365    frame_->EmitPush(kScratchRegister);
4366    if (typeof_state == INSIDE_TYPEOF) {
4367       value =
4368         frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4369    } else {
4370       value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4371    }
4372
4373    done.Bind(&value);
4374    frame_->Push(&value);
4375
4376  } else if (slot->var()->mode() == Variable::CONST) {
4377    // Const slots may contain 'the hole' value (the constant hasn't been
4378    // initialized yet) which needs to be converted into the 'undefined'
4379    // value.
4380    //
4381    // We currently spill the virtual frame because constants use the
4382    // potentially unsafe direct-frame access of SlotOperand.
4383    VirtualFrame::SpilledScope spilled_scope;
4384    Comment cmnt(masm_, "[ Load const");
4385    JumpTarget exit;
4386    __ movq(rcx, SlotOperand(slot, rcx));
4387    __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4388    exit.Branch(not_equal);
4389    __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
4390    exit.Bind();
4391    frame_->EmitPush(rcx);
4392
4393  } else if (slot->type() == Slot::PARAMETER) {
4394    frame_->PushParameterAt(slot->index());
4395
4396  } else if (slot->type() == Slot::LOCAL) {
4397    frame_->PushLocalAt(slot->index());
4398
4399  } else {
4400    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4401    // here.
4402    //
4403    // The use of SlotOperand below is safe for an unspilled frame
4404    // because it will always be a context slot.
4405    ASSERT(slot->type() == Slot::CONTEXT);
4406    Result temp = allocator_->Allocate();
4407    ASSERT(temp.is_valid());
4408    __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
4409    frame_->Push(&temp);
4410  }
4411}
4412
4413
4414void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4415                                                  TypeofState state) {
4416  LoadFromSlot(slot, state);
4417
4418  // Bail out quickly if we're not using lazy arguments allocation.
4419  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
4420
4421  // ... or if the slot isn't a non-parameter arguments slot.
4422  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
4423
4424  // Pop the loaded value from the stack.
4425  Result value = frame_->Pop();
4426
4427  // If the loaded value is a constant, we know if the arguments
4428  // object has been lazily loaded yet.
4429  if (value.is_constant()) {
4430    if (value.handle()->IsArgumentsMarker()) {
4431      Result arguments = StoreArgumentsObject(false);
4432      frame_->Push(&arguments);
4433    } else {
4434      frame_->Push(&value);
4435    }
4436    return;
4437  }
4438
4439  // The loaded value is in a register. If it is the sentinel that
4440  // indicates that we haven't loaded the arguments object yet, we
4441  // need to do it now.
4442  JumpTarget exit;
4443  __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
4444  frame_->Push(&value);
4445  exit.Branch(not_equal);
4446  Result arguments = StoreArgumentsObject(false);
4447  frame_->SetElementAt(0, &arguments);
4448  exit.Bind();
4449}
4450
4451
4452Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4453    Slot* slot,
4454    TypeofState typeof_state,
4455    JumpTarget* slow) {
4456  // Check that no extension objects have been created by calls to
4457  // eval from the current scope to the global scope.
4458  Register context = rsi;
4459  Result tmp = allocator_->Allocate();
4460  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
4461
4462  Scope* s = scope();
4463  while (s != NULL) {
4464    if (s->num_heap_slots() > 0) {
4465      if (s->calls_eval()) {
4466        // Check that extension is NULL.
4467        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4468               Immediate(0));
4469        slow->Branch(not_equal, not_taken);
4470      }
4471      // Load next context in chain.
4472      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4473      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4474      context = tmp.reg();
4475    }
4476    // If no outer scope calls eval, we do not need to check more
4477    // context extensions.  If we have reached an eval scope, we check
4478    // all extensions from this point.
4479    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
4480    s = s->outer_scope();
4481  }
4482
4483  if (s->is_eval_scope()) {
4484    // Loop up the context chain.  There is no frame effect so it is
4485    // safe to use raw labels here.
4486    Label next, fast;
4487    if (!context.is(tmp.reg())) {
4488      __ movq(tmp.reg(), context);
4489    }
4490    // Load map for comparison into register, outside loop.
4491    __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
4492    __ bind(&next);
4493    // Terminate at global context.
4494    __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
4495    __ j(equal, &fast);
4496    // Check that extension is NULL.
4497    __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
4498    slow->Branch(not_equal);
4499    // Load next context in chain.
4500    __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
4501    __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4502    __ jmp(&next);
4503    __ bind(&fast);
4504  }
4505  tmp.Unuse();
4506
4507  // All extension objects were empty and it is safe to use a global
4508  // load IC call.
4509  LoadGlobal();
4510  frame_->Push(slot->var()->name());
4511  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
4512                         ? RelocInfo::CODE_TARGET
4513                         : RelocInfo::CODE_TARGET_CONTEXT;
4514  Result answer = frame_->CallLoadIC(mode);
4515  // A test rax instruction following the call signals that the inobject
4516  // property case was inlined.  Ensure that there is not a test rax
4517  // instruction here.
4518  masm_->nop();
4519  return answer;
4520}
4521
4522
4523void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
4524                                                    TypeofState typeof_state,
4525                                                    Result* result,
4526                                                    JumpTarget* slow,
4527                                                    JumpTarget* done) {
4528  // Generate fast-case code for variables that might be shadowed by
4529  // eval-introduced variables.  Eval is used a lot without
4530  // introducing variables.  In those cases, we do not want to
4531  // perform a runtime call for all variables in the scope
4532  // containing the eval.
4533  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4534    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
4535    done->Jump(result);
4536
4537  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4538    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
4539    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
4540    if (potential_slot != NULL) {
4541      // Generate fast case for locals that rewrite to slots.
4542      // Allocate a fresh register to use as a temp in
4543      // ContextSlotOperandCheckExtensions and to hold the result
4544      // value.
4545      *result = allocator_->Allocate();
4546      ASSERT(result->is_valid());
4547      __ movq(result->reg(),
4548              ContextSlotOperandCheckExtensions(potential_slot,
4549                                                *result,
4550                                                slow));
4551      if (potential_slot->var()->mode() == Variable::CONST) {
4552        __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
4553        done->Branch(not_equal, result);
4554        __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
4555      }
4556      done->Jump(result);
4557    } else if (rewrite != NULL) {
4558      // Generate fast case for argument loads.
4559      Property* property = rewrite->AsProperty();
4560      if (property != NULL) {
4561        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
4562        Literal* key_literal = property->key()->AsLiteral();
4563        if (obj_proxy != NULL &&
4564            key_literal != NULL &&
4565            obj_proxy->IsArguments() &&
4566            key_literal->handle()->IsSmi()) {
4567          // Load arguments object if there are no eval-introduced
4568          // variables. Then load the argument from the arguments
4569          // object using keyed load.
4570          Result arguments = allocator()->Allocate();
4571          ASSERT(arguments.is_valid());
4572          __ movq(arguments.reg(),
4573                  ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
4574                                                    arguments,
4575                                                    slow));
4576          frame_->Push(&arguments);
4577          frame_->Push(key_literal->handle());
4578          *result = EmitKeyedLoad();
4579          done->Jump(result);
4580        }
4581      }
4582    }
4583  }
4584}
4585
4586
4587void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4588  if (slot->type() == Slot::LOOKUP) {
4589    ASSERT(slot->var()->is_dynamic());
4590
4591    // For now, just do a runtime call.  Since the call is inevitable,
4592    // we eagerly sync the virtual frame so we can directly push the
4593    // arguments into place.
4594    frame_->SyncRange(0, frame_->element_count() - 1);
4595
4596    frame_->EmitPush(rsi);
4597    frame_->EmitPush(slot->var()->name());
4598
4599    Result value;
4600    if (init_state == CONST_INIT) {
4601      // Same as the case for a normal store, but ignores attribute
4602      // (e.g. READ_ONLY) of context slot so that we can initialize const
4603      // properties (introduced via eval("const foo = (some expr);")). Also,
4604      // uses the current function context instead of the top context.
4605      //
4606      // Note that we must declare the foo upon entry of eval(), via a
4607      // context slot declaration, but we cannot initialize it at the same
4608      // time, because the const declaration may be at the end of the eval
4609      // code (sigh...) and the const variable may have been used before
4610      // (where its value is 'undefined'). Thus, we can only do the
4611      // initialization when we actually encounter the expression and when
4612      // the expression operands are defined and valid, and thus we need the
4613      // split into 2 operations: declaration of the context slot followed
4614      // by initialization.
4615      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4616    } else {
4617      frame_->Push(Smi::FromInt(strict_mode_flag()));
4618      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
4619    }
4620    // Storing a variable must keep the (new) value on the expression
4621    // stack. This is necessary for compiling chained assignment
4622    // expressions.
4623    frame_->Push(&value);
4624  } else {
4625    ASSERT(!slot->var()->is_dynamic());
4626
4627    JumpTarget exit;
4628    if (init_state == CONST_INIT) {
4629      ASSERT(slot->var()->mode() == Variable::CONST);
4630      // Only the first const initialization must be executed (the slot
4631      // still contains 'the hole' value). When the assignment is executed,
4632      // the code is identical to a normal store (see below).
4633      //
4634      // We spill the frame in the code below because the direct-frame
4635      // access of SlotOperand is potentially unsafe with an unspilled
4636      // frame.
4637      VirtualFrame::SpilledScope spilled_scope;
4638      Comment cmnt(masm_, "[ Init const");
4639      __ movq(rcx, SlotOperand(slot, rcx));
4640      __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4641      exit.Branch(not_equal);
4642    }
4643
4644    // We must execute the store.  Storing a variable must keep the (new)
4645    // value on the stack. This is necessary for compiling assignment
4646    // expressions.
4647    //
4648    // Note: We will reach here even with slot->var()->mode() ==
4649    // Variable::CONST because of const declarations which will initialize
4650    // consts to 'the hole' value and by doing so, end up calling this code.
4651    if (slot->type() == Slot::PARAMETER) {
4652      frame_->StoreToParameterAt(slot->index());
4653    } else if (slot->type() == Slot::LOCAL) {
4654      frame_->StoreToLocalAt(slot->index());
4655    } else {
4656      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
4657      //
4658      // The use of SlotOperand below is safe for an unspilled frame
4659      // because the slot is a context slot.
4660      ASSERT(slot->type() == Slot::CONTEXT);
4661      frame_->Dup();
4662      Result value = frame_->Pop();
4663      value.ToRegister();
4664      Result start = allocator_->Allocate();
4665      ASSERT(start.is_valid());
4666      __ movq(SlotOperand(slot, start.reg()), value.reg());
4667      // RecordWrite may destroy the value registers.
4668      //
4669      // TODO(204): Avoid actually spilling when the value is not
4670      // needed (probably the common case).
4671      frame_->Spill(value.reg());
4672      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4673      Result temp = allocator_->Allocate();
4674      ASSERT(temp.is_valid());
4675      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
4676      // The results start, value, and temp are unused by going out of
4677      // scope.
4678    }
4679
4680    exit.Bind();
4681  }
4682}
4683
4684
4685void CodeGenerator::VisitSlot(Slot* node) {
4686  Comment cmnt(masm_, "[ Slot");
4687  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
4688}
4689
4690
4691void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
4692  Comment cmnt(masm_, "[ VariableProxy");
4693  Variable* var = node->var();
4694  Expression* expr = var->rewrite();
4695  if (expr != NULL) {
4696    Visit(expr);
4697  } else {
4698    ASSERT(var->is_global());
4699    Reference ref(this, node);
4700    ref.GetValue();
4701  }
4702}
4703
4704
4705void CodeGenerator::VisitLiteral(Literal* node) {
4706  Comment cmnt(masm_, "[ Literal");
4707  frame_->Push(node->handle());
4708}
4709
4710
4711void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
4712  UNIMPLEMENTED();
4713  // TODO(X64): Implement security policy for loads of smis.
4714}
4715
4716
4717bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4718  return false;
4719}
4720
4721
4722// Materialize the regexp literal 'node' in the literals array
4723// 'literals' of the function.  Leave the regexp boilerplate in
4724// 'boilerplate'.
4725class DeferredRegExpLiteral: public DeferredCode {
4726 public:
4727  DeferredRegExpLiteral(Register boilerplate,
4728                        Register literals,
4729                        RegExpLiteral* node)
4730      : boilerplate_(boilerplate), literals_(literals), node_(node) {
4731    set_comment("[ DeferredRegExpLiteral");
4732  }
4733
4734  void Generate();
4735
4736 private:
4737  Register boilerplate_;
4738  Register literals_;
4739  RegExpLiteral* node_;
4740};
4741
4742
4743void DeferredRegExpLiteral::Generate() {
4744  // Since the entry is undefined we call the runtime system to
4745  // compute the literal.
4746  // Literal array (0).
4747  __ push(literals_);
4748  // Literal index (1).
4749  __ Push(Smi::FromInt(node_->literal_index()));
4750  // RegExp pattern (2).
4751  __ Push(node_->pattern());
4752  // RegExp flags (3).
4753  __ Push(node_->flags());
4754  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
4755  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
4756}
4757
4758
4759class DeferredAllocateInNewSpace: public DeferredCode {
4760 public:
4761  DeferredAllocateInNewSpace(int size,
4762                             Register target,
4763                             int registers_to_save = 0)
4764    : size_(size), target_(target), registers_to_save_(registers_to_save) {
4765    ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
4766    set_comment("[ DeferredAllocateInNewSpace");
4767  }
4768  void Generate();
4769
4770 private:
4771  int size_;
4772  Register target_;
4773  int registers_to_save_;
4774};
4775
4776
4777void DeferredAllocateInNewSpace::Generate() {
4778  for (int i = 0; i < kNumRegs; i++) {
4779    if (registers_to_save_ & (1 << i)) {
4780      Register save_register = { i };
4781      __ push(save_register);
4782    }
4783  }
4784  __ Push(Smi::FromInt(size_));
4785  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
4786  if (!target_.is(rax)) {
4787    __ movq(target_, rax);
4788  }
4789  for (int i = kNumRegs - 1; i >= 0; i--) {
4790    if (registers_to_save_ & (1 << i)) {
4791      Register save_register = { i };
4792      __ pop(save_register);
4793    }
4794  }
4795}
4796
4797
4798void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
4799  Comment cmnt(masm_, "[ RegExp Literal");
4800
4801  // Retrieve the literals array and check the allocated entry.  Begin
4802  // with a writable copy of the function of this activation in a
4803  // register.
4804  frame_->PushFunction();
4805  Result literals = frame_->Pop();
4806  literals.ToRegister();
4807  frame_->Spill(literals.reg());
4808
4809  // Load the literals array of the function.
4810  __ movq(literals.reg(),
4811          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4812
4813  // Load the literal at the ast saved index.
4814  Result boilerplate = allocator_->Allocate();
4815  ASSERT(boilerplate.is_valid());
4816  int literal_offset =
4817      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
4818  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
4819
4820  // Check whether we need to materialize the RegExp object.  If so,
4821  // jump to the deferred code passing the literals array.
4822  DeferredRegExpLiteral* deferred =
4823      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
4824  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
4825  deferred->Branch(equal);
4826  deferred->BindExit();
4827
4828  // Register of boilerplate contains RegExp object.
4829
4830  Result tmp = allocator()->Allocate();
4831  ASSERT(tmp.is_valid());
4832
4833  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4834
4835  DeferredAllocateInNewSpace* allocate_fallback =
4836      new DeferredAllocateInNewSpace(size, literals.reg());
4837  frame_->Push(&boilerplate);
4838  frame_->SpillTop();
4839  __ AllocateInNewSpace(size,
4840                        literals.reg(),
4841                        tmp.reg(),
4842                        no_reg,
4843                        allocate_fallback->entry_label(),
4844                        TAG_OBJECT);
4845  allocate_fallback->BindExit();
4846  boilerplate = frame_->Pop();
4847  // Copy from boilerplate to clone and return clone.
4848
4849  for (int i = 0; i < size; i += kPointerSize) {
4850    __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i));
4851    __ movq(FieldOperand(literals.reg(), i), tmp.reg());
4852  }
4853  frame_->Push(&literals);
4854}
4855
4856
4857void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
4858  Comment cmnt(masm_, "[ ObjectLiteral");
4859
4860  // Load a writable copy of the function of this activation in a
4861  // register.
4862  frame_->PushFunction();
4863  Result literals = frame_->Pop();
4864  literals.ToRegister();
4865  frame_->Spill(literals.reg());
4866
4867  // Load the literals array of the function.
4868  __ movq(literals.reg(),
4869          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4870  // Literal array.
4871  frame_->Push(&literals);
4872  // Literal index.
4873  frame_->Push(Smi::FromInt(node->literal_index()));
4874  // Constant properties.
4875  frame_->Push(node->constant_properties());
4876  // Should the object literal have fast elements?
4877  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
4878  Result clone;
4879  if (node->depth() > 1) {
4880    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
4881  } else {
4882    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
4883  }
4884  frame_->Push(&clone);
4885
4886  // Mark all computed expressions that are bound to a key that
4887  // is shadowed by a later occurrence of the same key. For the
4888  // marked expressions, no store code is emitted.
4889  node->CalculateEmitStore();
4890
4891  for (int i = 0; i < node->properties()->length(); i++) {
4892    ObjectLiteral::Property* property = node->properties()->at(i);
4893    switch (property->kind()) {
4894      case ObjectLiteral::Property::CONSTANT:
4895        break;
4896      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
4897        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
4898        // else fall through.
4899      case ObjectLiteral::Property::COMPUTED: {
4900        Handle<Object> key(property->key()->handle());
4901        if (key->IsSymbol()) {
4902          // Duplicate the object as the IC receiver.
4903          frame_->Dup();
4904          Load(property->value());
4905          if (property->emit_store()) {
4906            Result ignored =
4907                frame_->CallStoreIC(Handle<String>::cast(key), false,
4908                                    strict_mode_flag());
4909            // A test rax instruction following the store IC call would
4910            // indicate the presence of an inlined version of the
4911            // store. Add a nop to indicate that there is no such
4912            // inlined version.
4913            __ nop();
4914          } else {
4915            frame_->Drop(2);
4916          }
4917          break;
4918        }
4919        // Fall through
4920      }
4921      case ObjectLiteral::Property::PROTOTYPE: {
4922        // Duplicate the object as an argument to the runtime call.
4923        frame_->Dup();
4924        Load(property->key());
4925        Load(property->value());
4926        if (property->emit_store()) {
4927          frame_->Push(Smi::FromInt(NONE));   // PropertyAttributes
4928          // Ignore the result.
4929          Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
4930        } else {
4931          frame_->Drop(3);
4932        }
4933        break;
4934      }
4935      case ObjectLiteral::Property::SETTER: {
4936        // Duplicate the object as an argument to the runtime call.
4937        frame_->Dup();
4938        Load(property->key());
4939        frame_->Push(Smi::FromInt(1));
4940        Load(property->value());
4941        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
4942        // Ignore the result.
4943        break;
4944      }
4945      case ObjectLiteral::Property::GETTER: {
4946        // Duplicate the object as an argument to the runtime call.
4947        frame_->Dup();
4948        Load(property->key());
4949        frame_->Push(Smi::FromInt(0));
4950        Load(property->value());
4951        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
4952        // Ignore the result.
4953        break;
4954      }
4955      default: UNREACHABLE();
4956    }
4957  }
4958}
4959
4960
4961void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
4962  Comment cmnt(masm_, "[ ArrayLiteral");
4963
4964  // Load a writable copy of the function of this activation in a
4965  // register.
4966  frame_->PushFunction();
4967  Result literals = frame_->Pop();
4968  literals.ToRegister();
4969  frame_->Spill(literals.reg());
4970
4971  // Load the literals array of the function.
4972  __ movq(literals.reg(),
4973          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4974
4975  frame_->Push(&literals);
4976  frame_->Push(Smi::FromInt(node->literal_index()));
4977  frame_->Push(node->constant_elements());
4978  int length = node->values()->length();
4979  Result clone;
4980  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
4981    FastCloneShallowArrayStub stub(
4982        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
4983    clone = frame_->CallStub(&stub, 3);
4984    Counters* counters = masm()->isolate()->counters();
4985    __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
4986  } else if (node->depth() > 1) {
4987    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
4988  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4989    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
4990  } else {
4991    FastCloneShallowArrayStub stub(
4992        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
4993    clone = frame_->CallStub(&stub, 3);
4994  }
4995  frame_->Push(&clone);
4996
4997  // Generate code to set the elements in the array that are not
4998  // literals.
4999  for (int i = 0; i < length; i++) {
5000    Expression* value = node->values()->at(i);
5001
5002    if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
5003      continue;
5004    }
5005
5006    // The property must be set by generated code.
5007    Load(value);
5008
5009    // Get the property value off the stack.
5010    Result prop_value = frame_->Pop();
5011    prop_value.ToRegister();
5012
5013    // Fetch the array literal while leaving a copy on the stack and
5014    // use it to get the elements array.
5015    frame_->Dup();
5016    Result elements = frame_->Pop();
5017    elements.ToRegister();
5018    frame_->Spill(elements.reg());
5019    // Get the elements FixedArray.
5020    __ movq(elements.reg(),
5021            FieldOperand(elements.reg(), JSObject::kElementsOffset));
5022
5023    // Write to the indexed properties array.
5024    int offset = i * kPointerSize + FixedArray::kHeaderSize;
5025    __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
5026
5027    // Update the write barrier for the array address.
5028    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
5029    Result scratch = allocator_->Allocate();
5030    ASSERT(scratch.is_valid());
5031    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
5032  }
5033}
5034
5035
5036void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
5037  ASSERT(!in_spilled_code());
5038  // Call runtime routine to allocate the catch extension object and
5039  // assign the exception value to the catch variable.
5040  Comment cmnt(masm_, "[ CatchExtensionObject");
5041  Load(node->key());
5042  Load(node->value());
5043  Result result =
5044      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
5045  frame_->Push(&result);
5046}
5047
5048
5049void CodeGenerator::EmitSlotAssignment(Assignment* node) {
5050#ifdef DEBUG
5051  int original_height = frame()->height();
5052#endif
5053  Comment cmnt(masm(), "[ Variable Assignment");
5054  Variable* var = node->target()->AsVariableProxy()->AsVariable();
5055  ASSERT(var != NULL);
5056  Slot* slot = var->AsSlot();
5057  ASSERT(slot != NULL);
5058
5059  // Evaluate the right-hand side.
5060  if (node->is_compound()) {
5061    // For a compound assignment the right-hand side is a binary operation
5062    // between the current property value and the actual right-hand side.
5063    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5064    Load(node->value());
5065
5066    // Perform the binary operation.
5067    bool overwrite_value = node->value()->ResultOverwriteAllowed();
5068    // Construct the implicit binary operation.
5069    BinaryOperation expr(node);
5070    GenericBinaryOperation(&expr,
5071                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
5072  } else {
5073    // For non-compound assignment just load the right-hand side.
5074    Load(node->value());
5075  }
5076
5077  // Perform the assignment.
5078  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
5079    CodeForSourcePosition(node->position());
5080    StoreToSlot(slot,
5081                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
5082  }
5083  ASSERT(frame()->height() == original_height + 1);
5084}
5085
5086
5087void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
5088#ifdef DEBUG
5089  int original_height = frame()->height();
5090#endif
5091  Comment cmnt(masm(), "[ Named Property Assignment");
5092  Variable* var = node->target()->AsVariableProxy()->AsVariable();
5093  Property* prop = node->target()->AsProperty();
5094  ASSERT(var == NULL || (prop == NULL && var->is_global()));
5095
5096  // Initialize name and evaluate the receiver sub-expression if necessary. If
5097  // the receiver is trivial it is not placed on the stack at this point, but
5098  // loaded whenever actually needed.
5099  Handle<String> name;
5100  bool is_trivial_receiver = false;
5101  if (var != NULL) {
5102    name = var->name();
5103  } else {
5104    Literal* lit = prop->key()->AsLiteral();
5105    ASSERT_NOT_NULL(lit);
5106    name = Handle<String>::cast(lit->handle());
5107    // Do not materialize the receiver on the frame if it is trivial.
5108    is_trivial_receiver = prop->obj()->IsTrivial();
5109    if (!is_trivial_receiver) Load(prop->obj());
5110  }
5111
5112  // Change to slow case in the beginning of an initialization block to
5113  // avoid the quadratic behavior of repeatedly adding fast properties.
5114  if (node->starts_initialization_block()) {
5115    // Initialization block consists of assignments of the form expr.x = ..., so
5116    // this will never be an assignment to a variable, so there must be a
5117    // receiver object.
5118    ASSERT_EQ(NULL, var);
5119    if (is_trivial_receiver) {
5120      frame()->Push(prop->obj());
5121    } else {
5122      frame()->Dup();
5123    }
5124    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
5125  }
5126
5127  // Change to fast case at the end of an initialization block. To prepare for
5128  // that add an extra copy of the receiver to the frame, so that it can be
5129  // converted back to fast case after the assignment.
5130  if (node->ends_initialization_block() && !is_trivial_receiver) {
5131    frame()->Dup();
5132  }
5133
5134  // Stack layout:
5135  // [tos]   : receiver (only materialized if non-trivial)
5136  // [tos+1] : receiver if at the end of an initialization block
5137
5138  // Evaluate the right-hand side.
5139  if (node->is_compound()) {
5140    // For a compound assignment the right-hand side is a binary operation
5141    // between the current property value and the actual right-hand side.
5142    if (is_trivial_receiver) {
5143      frame()->Push(prop->obj());
5144    } else if (var != NULL) {
5145      // The LoadIC stub expects the object in rax.
5146      // Freeing rax causes the code generator to load the global into it.
5147      frame_->Spill(rax);
5148      LoadGlobal();
5149    } else {
5150      frame()->Dup();
5151    }
5152    Result value = EmitNamedLoad(name, var != NULL);
5153    frame()->Push(&value);
5154    Load(node->value());
5155
5156    bool overwrite_value = node->value()->ResultOverwriteAllowed();
5157    // Construct the implicit binary operation.
5158    BinaryOperation expr(node);
5159    GenericBinaryOperation(&expr,
5160                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
5161  } else {
5162    // For non-compound assignment just load the right-hand side.
5163    Load(node->value());
5164  }
5165
5166  // Stack layout:
5167  // [tos]   : value
5168  // [tos+1] : receiver (only materialized if non-trivial)
5169  // [tos+2] : receiver if at the end of an initialization block
5170
5171  // Perform the assignment.  It is safe to ignore constants here.
5172  ASSERT(var == NULL || var->mode() != Variable::CONST);
5173  ASSERT_NE(Token::INIT_CONST, node->op());
5174  if (is_trivial_receiver) {
5175    Result value = frame()->Pop();
5176    frame()->Push(prop->obj());
5177    frame()->Push(&value);
5178  }
5179  CodeForSourcePosition(node->position());
5180  bool is_contextual = (var != NULL);
5181  Result answer = EmitNamedStore(name, is_contextual);
5182  frame()->Push(&answer);
5183
5184  // Stack layout:
5185  // [tos]   : result
5186  // [tos+1] : receiver if at the end of an initialization block
5187
5188  if (node->ends_initialization_block()) {
5189    ASSERT_EQ(NULL, var);
5190    // The argument to the runtime call is the receiver.
5191    if (is_trivial_receiver) {
5192      frame()->Push(prop->obj());
5193    } else {
5194      // A copy of the receiver is below the value of the assignment.  Swap
5195      // the receiver and the value of the assignment expression.
5196      Result result = frame()->Pop();
5197      Result receiver = frame()->Pop();
5198      frame()->Push(&result);
5199      frame()->Push(&receiver);
5200    }
5201    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
5202  }
5203
5204  // Stack layout:
5205  // [tos]   : result
5206
5207  ASSERT_EQ(frame()->height(), original_height + 1);
5208}
5209
5210
5211void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
5212#ifdef DEBUG
5213  int original_height = frame()->height();
5214#endif
5215  Comment cmnt(masm_, "[ Keyed Property Assignment");
5216  Property* prop = node->target()->AsProperty();
5217  ASSERT_NOT_NULL(prop);
5218
5219  // Evaluate the receiver subexpression.
5220  Load(prop->obj());
5221
5222  // Change to slow case in the beginning of an initialization block to
5223  // avoid the quadratic behavior of repeatedly adding fast properties.
5224  if (node->starts_initialization_block()) {
5225    frame_->Dup();
5226    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
5227  }
5228
5229  // Change to fast case at the end of an initialization block. To prepare for
5230  // that add an extra copy of the receiver to the frame, so that it can be
5231  // converted back to fast case after the assignment.
5232  if (node->ends_initialization_block()) {
5233    frame_->Dup();
5234  }
5235
5236  // Evaluate the key subexpression.
5237  Load(prop->key());
5238
5239  // Stack layout:
5240  // [tos]   : key
5241  // [tos+1] : receiver
5242  // [tos+2] : receiver if at the end of an initialization block
5243
5244  // Evaluate the right-hand side.
5245  if (node->is_compound()) {
5246    // For a compound assignment the right-hand side is a binary operation
5247    // between the current property value and the actual right-hand side.
5248    // Duplicate receiver and key for loading the current property value.
5249    frame()->PushElementAt(1);
5250    frame()->PushElementAt(1);
5251    Result value = EmitKeyedLoad();
5252    frame()->Push(&value);
5253    Load(node->value());
5254
5255    // Perform the binary operation.
5256    bool overwrite_value = node->value()->ResultOverwriteAllowed();
5257    BinaryOperation expr(node);
5258    GenericBinaryOperation(&expr,
5259                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
5260  } else {
5261    // For non-compound assignment just load the right-hand side.
5262    Load(node->value());
5263  }
5264
5265  // Stack layout:
5266  // [tos]   : value
5267  // [tos+1] : key
5268  // [tos+2] : receiver
5269  // [tos+3] : receiver if at the end of an initialization block
5270
5271  // Perform the assignment.  It is safe to ignore constants here.
5272  ASSERT(node->op() != Token::INIT_CONST);
5273  CodeForSourcePosition(node->position());
5274  Result answer = EmitKeyedStore(prop->key()->type());
5275  frame()->Push(&answer);
5276
5277  // Stack layout:
5278  // [tos]   : result
5279  // [tos+1] : receiver if at the end of an initialization block
5280
5281  // Change to fast case at the end of an initialization block.
5282  if (node->ends_initialization_block()) {
5283    // The argument to the runtime call is the extra copy of the receiver,
5284    // which is below the value of the assignment.  Swap the receiver and
5285    // the value of the assignment expression.
5286    Result result = frame()->Pop();
5287    Result receiver = frame()->Pop();
5288    frame()->Push(&result);
5289    frame()->Push(&receiver);
5290    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
5291  }
5292
5293  // Stack layout:
5294  // [tos]   : result
5295
5296  ASSERT(frame()->height() == original_height + 1);
5297}
5298
5299
5300void CodeGenerator::VisitAssignment(Assignment* node) {
5301#ifdef DEBUG
5302  int original_height = frame()->height();
5303#endif
5304  Variable* var = node->target()->AsVariableProxy()->AsVariable();
5305  Property* prop = node->target()->AsProperty();
5306
5307  if (var != NULL && !var->is_global()) {
5308    EmitSlotAssignment(node);
5309
5310  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
5311             (var != NULL && var->is_global())) {
5312    // Properties whose keys are property names and global variables are
5313    // treated as named property references.  We do not need to consider
5314    // global 'this' because it is not a valid left-hand side.
5315    EmitNamedPropertyAssignment(node);
5316
5317  } else if (prop != NULL) {
5318    // Other properties (including rewritten parameters for a function that
5319    // uses arguments) are keyed property assignments.
5320    EmitKeyedPropertyAssignment(node);
5321
5322  } else {
5323    // Invalid left-hand side.
5324    Load(node->target());
5325    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
5326    // The runtime call doesn't actually return but the code generator will
5327    // still generate code and expects a certain frame height.
5328    frame()->Push(&result);
5329  }
5330
5331  ASSERT(frame()->height() == original_height + 1);
5332}
5333
5334
5335void CodeGenerator::VisitThrow(Throw* node) {
5336  Comment cmnt(masm_, "[ Throw");
5337  Load(node->exception());
5338  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
5339  frame_->Push(&result);
5340}
5341
5342
5343void CodeGenerator::VisitProperty(Property* node) {
5344  Comment cmnt(masm_, "[ Property");
5345  Reference property(this, node);
5346  property.GetValue();
5347}
5348
5349
5350void CodeGenerator::VisitCall(Call* node) {
5351  Comment cmnt(masm_, "[ Call");
5352
5353  ZoneList<Expression*>* args = node->arguments();
5354
5355  // Check if the function is a variable or a property.
5356  Expression* function = node->expression();
5357  Variable* var = function->AsVariableProxy()->AsVariable();
5358  Property* property = function->AsProperty();
5359
5360  // ------------------------------------------------------------------------
5361  // Fast-case: Use inline caching.
5362  // ---
5363  // According to ECMA-262, section 11.2.3, page 44, the function to call
5364  // must be resolved after the arguments have been evaluated. The IC code
5365  // automatically handles this by loading the arguments before the function
5366  // is resolved in cache misses (this also holds for megamorphic calls).
5367  // ------------------------------------------------------------------------
5368
5369  if (var != NULL && var->is_possibly_eval()) {
5370    // ----------------------------------
5371    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
5372    // ----------------------------------
5373
5374    // In a call to eval, we first call %ResolvePossiblyDirectEval to
5375    // resolve the function we need to call and the receiver of the
5376    // call.  Then we call the resolved function using the given
5377    // arguments.
5378
5379    // Prepare the stack for the call to the resolved function.
5380    Load(function);
5381
5382    // Allocate a frame slot for the receiver.
5383    frame_->Push(FACTORY->undefined_value());
5384
5385    // Load the arguments.
5386    int arg_count = args->length();
5387    for (int i = 0; i < arg_count; i++) {
5388      Load(args->at(i));
5389      frame_->SpillTop();
5390    }
5391
5392    // Result to hold the result of the function resolution and the
5393    // final result of the eval call.
5394    Result result;
5395
5396    // If we know that eval can only be shadowed by eval-introduced
5397    // variables we attempt to load the global eval function directly
5398    // in generated code. If we succeed, there is no need to perform a
5399    // context lookup in the runtime system.
5400    JumpTarget done;
5401    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
5402      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
5403      JumpTarget slow;
5404      // Prepare the stack for the call to
5405      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
5406      // function, the first argument to the eval call and the
5407      // receiver.
5408      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
5409                                                     NOT_INSIDE_TYPEOF,
5410                                                     &slow);
5411      frame_->Push(&fun);
5412      if (arg_count > 0) {
5413        frame_->PushElementAt(arg_count);
5414      } else {
5415        frame_->Push(FACTORY->undefined_value());
5416      }
5417      frame_->PushParameterAt(-1);
5418
5419      // Push the strict mode flag.
5420      frame_->Push(Smi::FromInt(strict_mode_flag()));
5421
5422      // Resolve the call.
5423      result =
5424          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
5425
5426      done.Jump(&result);
5427      slow.Bind();
5428    }
5429
5430    // Prepare the stack for the call to ResolvePossiblyDirectEval by
5431    // pushing the loaded function, the first argument to the eval
5432    // call and the receiver.
5433    frame_->PushElementAt(arg_count + 1);
5434    if (arg_count > 0) {
5435      frame_->PushElementAt(arg_count);
5436    } else {
5437      frame_->Push(FACTORY->undefined_value());
5438    }
5439    frame_->PushParameterAt(-1);
5440
5441    // Push the strict mode flag.
5442    frame_->Push(Smi::FromInt(strict_mode_flag()));
5443
5444    // Resolve the call.
5445    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
5446
5447    // If we generated fast-case code bind the jump-target where fast
5448    // and slow case merge.
5449    if (done.is_linked()) done.Bind(&result);
5450
5451    // The runtime call returns a pair of values in rax (function) and
5452    // rdx (receiver). Touch up the stack with the right values.
5453    Result receiver = allocator_->Allocate(rdx);
5454    frame_->SetElementAt(arg_count + 1, &result);
5455    frame_->SetElementAt(arg_count, &receiver);
5456    receiver.Unuse();
5457
5458    // Call the function.
5459    CodeForSourcePosition(node->position());
5460    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
5461    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
5462    result = frame_->CallStub(&call_function, arg_count + 1);
5463
5464    // Restore the context and overwrite the function on the stack with
5465    // the result.
5466    frame_->RestoreContextRegister();
5467    frame_->SetElementAt(0, &result);
5468
5469  } else if (var != NULL && !var->is_this() && var->is_global()) {
5470    // ----------------------------------
5471    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
5472    // ----------------------------------
5473
5474    // Pass the global object as the receiver and let the IC stub
5475    // patch the stack to use the global proxy as 'this' in the
5476    // invoked function.
5477    LoadGlobal();
5478
5479    // Load the arguments.
5480    int arg_count = args->length();
5481    for (int i = 0; i < arg_count; i++) {
5482      Load(args->at(i));
5483      frame_->SpillTop();
5484    }
5485
5486    // Push the name of the function on the frame.
5487    frame_->Push(var->name());
5488
5489    // Call the IC initialization code.
5490    CodeForSourcePosition(node->position());
5491    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
5492                                       arg_count,
5493                                       loop_nesting());
5494    frame_->RestoreContextRegister();
5495    // Replace the function on the stack with the result.
5496    frame_->Push(&result);
5497
5498  } else if (var != NULL && var->AsSlot() != NULL &&
5499             var->AsSlot()->type() == Slot::LOOKUP) {
5500    // ----------------------------------
5501    // JavaScript examples:
5502    //
5503    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
5504    //
5505    //  function f() {};
5506    //  function g() {
5507    //    eval(...);
5508    //    f();  // f could be in extension object.
5509    //  }
5510    // ----------------------------------
5511
5512    JumpTarget slow, done;
5513    Result function;
5514
5515    // Generate fast case for loading functions from slots that
5516    // correspond to local/global variables or arguments unless they
5517    // are shadowed by eval-introduced bindings.
5518    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
5519                                    NOT_INSIDE_TYPEOF,
5520                                    &function,
5521                                    &slow,
5522                                    &done);
5523
5524    slow.Bind();
5525    // Load the function from the context.  Sync the frame so we can
5526    // push the arguments directly into place.
5527    frame_->SyncRange(0, frame_->element_count() - 1);
5528    frame_->EmitPush(rsi);
5529    frame_->EmitPush(var->name());
5530    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
5531    // The runtime call returns a pair of values in rax and rdx.  The
5532    // looked-up function is in rax and the receiver is in rdx.  These
5533    // register references are not ref counted here.  We spill them
5534    // eagerly since they are arguments to an inevitable call (and are
5535    // not sharable by the arguments).
5536    ASSERT(!allocator()->is_used(rax));
5537    frame_->EmitPush(rax);
5538
5539    // Load the receiver.
5540    ASSERT(!allocator()->is_used(rdx));
5541    frame_->EmitPush(rdx);
5542
5543    // If fast case code has been generated, emit code to push the
5544    // function and receiver and have the slow path jump around this
5545    // code.
5546    if (done.is_linked()) {
5547      JumpTarget call;
5548      call.Jump();
5549      done.Bind(&function);
5550      frame_->Push(&function);
5551      LoadGlobalReceiver();
5552      call.Bind();
5553    }
5554
5555    // Call the function.
5556    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
5557
5558  } else if (property != NULL) {
5559    // Check if the key is a literal string.
5560    Literal* literal = property->key()->AsLiteral();
5561
5562    if (literal != NULL && literal->handle()->IsSymbol()) {
5563      // ------------------------------------------------------------------
5564      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
5565      // ------------------------------------------------------------------
5566
5567      Handle<String> name = Handle<String>::cast(literal->handle());
5568
5569      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
5570          name->IsEqualTo(CStrVector("apply")) &&
5571          args->length() == 2 &&
5572          args->at(1)->AsVariableProxy() != NULL &&
5573          args->at(1)->AsVariableProxy()->IsArguments()) {
5574        // Use the optimized Function.prototype.apply that avoids
5575        // allocating lazily allocated arguments objects.
5576        CallApplyLazy(property->obj(),
5577                      args->at(0),
5578                      args->at(1)->AsVariableProxy(),
5579                      node->position());
5580
5581      } else {
5582        // Push the receiver onto the frame.
5583        Load(property->obj());
5584
5585        // Load the arguments.
5586        int arg_count = args->length();
5587        for (int i = 0; i < arg_count; i++) {
5588          Load(args->at(i));
5589          frame_->SpillTop();
5590        }
5591
5592        // Push the name of the function onto the frame.
5593        frame_->Push(name);
5594
5595        // Call the IC initialization code.
5596        CodeForSourcePosition(node->position());
5597        Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
5598                                           arg_count,
5599                                           loop_nesting());
5600        frame_->RestoreContextRegister();
5601        frame_->Push(&result);
5602      }
5603
5604    } else {
5605      // -------------------------------------------
5606      // JavaScript example: 'array[index](1, 2, 3)'
5607      // -------------------------------------------
5608
5609      // Load the function to call from the property through a reference.
5610      if (property->is_synthetic()) {
5611        Reference ref(this, property, false);
5612        ref.GetValue();
5613        // Use global object as receiver.
5614        LoadGlobalReceiver();
5615       // Call the function.
5616        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
5617      } else {
5618        // Push the receiver onto the frame.
5619        Load(property->obj());
5620
5621        // Load the name of the function.
5622        Load(property->key());
5623
5624        // Swap the name of the function and the receiver on the stack to follow
5625        // the calling convention for call ICs.
5626        Result key = frame_->Pop();
5627        Result receiver = frame_->Pop();
5628        frame_->Push(&key);
5629        frame_->Push(&receiver);
5630        key.Unuse();
5631        receiver.Unuse();
5632
5633        // Load the arguments.
5634        int arg_count = args->length();
5635        for (int i = 0; i < arg_count; i++) {
5636          Load(args->at(i));
5637          frame_->SpillTop();
5638        }
5639
5640        // Place the key on top of stack and call the IC initialization code.
5641        frame_->PushElementAt(arg_count + 1);
5642        CodeForSourcePosition(node->position());
5643        Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
5644                                                arg_count,
5645                                                loop_nesting());
5646        frame_->Drop();  // Drop the key still on the stack.
5647        frame_->RestoreContextRegister();
5648        frame_->Push(&result);
5649      }
5650    }
5651  } else {
5652    // ----------------------------------
5653    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
5654    // ----------------------------------
5655
5656    // Load the function.
5657    Load(function);
5658
5659    // Pass the global proxy as the receiver.
5660    LoadGlobalReceiver();
5661
5662    // Call the function.
5663    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
5664  }
5665}
5666
5667
5668void CodeGenerator::VisitCallNew(CallNew* node) {
5669  Comment cmnt(masm_, "[ CallNew");
5670
5671  // According to ECMA-262, section 11.2.2, page 44, the function
5672  // expression in new calls must be evaluated before the
5673  // arguments. This is different from ordinary calls, where the
5674  // actual function to call is resolved after the arguments have been
5675  // evaluated.
5676
5677  // Push constructor on the stack.  If it's not a function it's used as
5678  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
5679  // ignored.
5680  Load(node->expression());
5681
5682  // Push the arguments ("left-to-right") on the stack.
5683  ZoneList<Expression*>* args = node->arguments();
5684  int arg_count = args->length();
5685  for (int i = 0; i < arg_count; i++) {
5686    Load(args->at(i));
5687  }
5688
5689  // Call the construct call builtin that handles allocation and
5690  // constructor invocation.
5691  CodeForSourcePosition(node->position());
5692  Result result = frame_->CallConstructor(arg_count);
5693  frame_->Push(&result);
5694}
5695
5696
5697void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
5698  ASSERT(args->length() == 1);
5699  Load(args->at(0));
5700  Result value = frame_->Pop();
5701  value.ToRegister();
5702  ASSERT(value.is_valid());
5703  Condition is_smi = masm_->CheckSmi(value.reg());
5704  value.Unuse();
5705  destination()->Split(is_smi);
5706}
5707
5708
5709void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
5710  // Conditionally generate a log call.
5711  // Args:
5712  //   0 (literal string): The type of logging (corresponds to the flags).
5713  //     This is used to determine whether or not to generate the log call.
5714  //   1 (string): Format string.  Access the string at argument index 2
5715  //     with '%2s' (see Logger::LogRuntime for all the formats).
5716  //   2 (array): Arguments to the format string.
5717  ASSERT_EQ(args->length(), 3);
5718#ifdef ENABLE_LOGGING_AND_PROFILING
5719  if (ShouldGenerateLog(args->at(0))) {
5720    Load(args->at(1));
5721    Load(args->at(2));
5722    frame_->CallRuntime(Runtime::kLog, 2);
5723  }
5724#endif
5725  // Finally, we're expected to leave a value on the top of the stack.
5726  frame_->Push(FACTORY->undefined_value());
5727}
5728
5729
5730void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
5731  ASSERT(args->length() == 1);
5732  Load(args->at(0));
5733  Result value = frame_->Pop();
5734  value.ToRegister();
5735  ASSERT(value.is_valid());
5736  Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg());
5737  value.Unuse();
5738  destination()->Split(non_negative_smi);
5739}
5740
5741
5742class DeferredStringCharCodeAt : public DeferredCode {
5743 public:
5744  DeferredStringCharCodeAt(Register object,
5745                           Register index,
5746                           Register scratch,
5747                           Register result)
5748      : result_(result),
5749        char_code_at_generator_(object,
5750                                index,
5751                                scratch,
5752                                result,
5753                                &need_conversion_,
5754                                &need_conversion_,
5755                                &index_out_of_range_,
5756                                STRING_INDEX_IS_NUMBER) {}
5757
5758  StringCharCodeAtGenerator* fast_case_generator() {
5759    return &char_code_at_generator_;
5760  }
5761
5762  virtual void Generate() {
5763    VirtualFrameRuntimeCallHelper call_helper(frame_state());
5764    char_code_at_generator_.GenerateSlow(masm(), call_helper);
5765
5766    __ bind(&need_conversion_);
5767    // Move the undefined value into the result register, which will
5768    // trigger conversion.
5769    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
5770    __ jmp(exit_label());
5771
5772    __ bind(&index_out_of_range_);
5773    // When the index is out of range, the spec requires us to return
5774    // NaN.
5775    __ LoadRoot(result_, Heap::kNanValueRootIndex);
5776    __ jmp(exit_label());
5777  }
5778
5779 private:
5780  Register result_;
5781
5782  Label need_conversion_;
5783  Label index_out_of_range_;
5784
5785  StringCharCodeAtGenerator char_code_at_generator_;
5786};
5787
5788
5789// This generates code that performs a String.prototype.charCodeAt() call
5790// or returns a smi in order to trigger conversion.
5791void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
5792  Comment(masm_, "[ GenerateStringCharCodeAt");
5793  ASSERT(args->length() == 2);
5794
5795  Load(args->at(0));
5796  Load(args->at(1));
5797  Result index = frame_->Pop();
5798  Result object = frame_->Pop();
5799  object.ToRegister();
5800  index.ToRegister();
5801  // We might mutate the object register.
5802  frame_->Spill(object.reg());
5803
5804  // We need two extra registers.
5805  Result result = allocator()->Allocate();
5806  ASSERT(result.is_valid());
5807  Result scratch = allocator()->Allocate();
5808  ASSERT(scratch.is_valid());
5809
5810  DeferredStringCharCodeAt* deferred =
5811      new DeferredStringCharCodeAt(object.reg(),
5812                                   index.reg(),
5813                                   scratch.reg(),
5814                                   result.reg());
5815  deferred->fast_case_generator()->GenerateFast(masm_);
5816  deferred->BindExit();
5817  frame_->Push(&result);
5818}
5819
5820
5821class DeferredStringCharFromCode : public DeferredCode {
5822 public:
5823  DeferredStringCharFromCode(Register code,
5824                             Register result)
5825      : char_from_code_generator_(code, result) {}
5826
5827  StringCharFromCodeGenerator* fast_case_generator() {
5828    return &char_from_code_generator_;
5829  }
5830
5831  virtual void Generate() {
5832    VirtualFrameRuntimeCallHelper call_helper(frame_state());
5833    char_from_code_generator_.GenerateSlow(masm(), call_helper);
5834  }
5835
5836 private:
5837  StringCharFromCodeGenerator char_from_code_generator_;
5838};
5839
5840
5841// Generates code for creating a one-char string from a char code.
5842void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
5843  Comment(masm_, "[ GenerateStringCharFromCode");
5844  ASSERT(args->length() == 1);
5845
5846  Load(args->at(0));
5847
5848  Result code = frame_->Pop();
5849  code.ToRegister();
5850  ASSERT(code.is_valid());
5851
5852  Result result = allocator()->Allocate();
5853  ASSERT(result.is_valid());
5854
5855  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
5856      code.reg(), result.reg());
5857  deferred->fast_case_generator()->GenerateFast(masm_);
5858  deferred->BindExit();
5859  frame_->Push(&result);
5860}
5861
5862
5863class DeferredStringCharAt : public DeferredCode {
5864 public:
5865  DeferredStringCharAt(Register object,
5866                       Register index,
5867                       Register scratch1,
5868                       Register scratch2,
5869                       Register result)
5870      : result_(result),
5871        char_at_generator_(object,
5872                           index,
5873                           scratch1,
5874                           scratch2,
5875                           result,
5876                           &need_conversion_,
5877                           &need_conversion_,
5878                           &index_out_of_range_,
5879                           STRING_INDEX_IS_NUMBER) {}
5880
5881  StringCharAtGenerator* fast_case_generator() {
5882    return &char_at_generator_;
5883  }
5884
5885  virtual void Generate() {
5886    VirtualFrameRuntimeCallHelper call_helper(frame_state());
5887    char_at_generator_.GenerateSlow(masm(), call_helper);
5888
5889    __ bind(&need_conversion_);
5890    // Move smi zero into the result register, which will trigger
5891    // conversion.
5892    __ Move(result_, Smi::FromInt(0));
5893    __ jmp(exit_label());
5894
5895    __ bind(&index_out_of_range_);
5896    // When the index is out of range, the spec requires us to return
5897    // the empty string.
5898    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
5899    __ jmp(exit_label());
5900  }
5901
5902 private:
5903  Register result_;
5904
5905  Label need_conversion_;
5906  Label index_out_of_range_;
5907
5908  StringCharAtGenerator char_at_generator_;
5909};
5910
5911
5912// This generates code that performs a String.prototype.charAt() call
5913// or returns a smi in order to trigger conversion.
5914void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
5915  Comment(masm_, "[ GenerateStringCharAt");
5916  ASSERT(args->length() == 2);
5917
5918  Load(args->at(0));
5919  Load(args->at(1));
5920  Result index = frame_->Pop();
5921  Result object = frame_->Pop();
5922  object.ToRegister();
5923  index.ToRegister();
5924  // We might mutate the object register.
5925  frame_->Spill(object.reg());
5926
5927  // We need three extra registers.
5928  Result result = allocator()->Allocate();
5929  ASSERT(result.is_valid());
5930  Result scratch1 = allocator()->Allocate();
5931  ASSERT(scratch1.is_valid());
5932  Result scratch2 = allocator()->Allocate();
5933  ASSERT(scratch2.is_valid());
5934
5935  DeferredStringCharAt* deferred =
5936      new DeferredStringCharAt(object.reg(),
5937                               index.reg(),
5938                               scratch1.reg(),
5939                               scratch2.reg(),
5940                               result.reg());
5941  deferred->fast_case_generator()->GenerateFast(masm_);
5942  deferred->BindExit();
5943  frame_->Push(&result);
5944}
5945
5946
5947void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
5948  ASSERT(args->length() == 1);
5949  Load(args->at(0));
5950  Result value = frame_->Pop();
5951  value.ToRegister();
5952  ASSERT(value.is_valid());
5953  Condition is_smi = masm_->CheckSmi(value.reg());
5954  destination()->false_target()->Branch(is_smi);
5955  // It is a heap object - get map.
5956  // Check if the object is a JS array or not.
5957  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
5958  value.Unuse();
5959  destination()->Split(equal);
5960}
5961
5962
5963void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
5964  ASSERT(args->length() == 1);
5965  Load(args->at(0));
5966  Result value = frame_->Pop();
5967  value.ToRegister();
5968  ASSERT(value.is_valid());
5969  Condition is_smi = masm_->CheckSmi(value.reg());
5970  destination()->false_target()->Branch(is_smi);
5971  // It is a heap object - get map.
5972  // Check if the object is a regexp.
5973  __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
5974  value.Unuse();
5975  destination()->Split(equal);
5976}
5977
5978
5979void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
5980  // This generates a fast version of:
5981  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
5982  ASSERT(args->length() == 1);
5983  Load(args->at(0));
5984  Result obj = frame_->Pop();
5985  obj.ToRegister();
5986  Condition is_smi = masm_->CheckSmi(obj.reg());
5987  destination()->false_target()->Branch(is_smi);
5988
5989  __ Move(kScratchRegister, FACTORY->null_value());
5990  __ cmpq(obj.reg(), kScratchRegister);
5991  destination()->true_target()->Branch(equal);
5992
5993  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
5994  // Undetectable objects behave like undefined when tested with typeof.
5995  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
5996          Immediate(1 << Map::kIsUndetectable));
5997  destination()->false_target()->Branch(not_zero);
5998  __ movzxbq(kScratchRegister,
5999             FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
6000  __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
6001  destination()->false_target()->Branch(below);
6002  __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
6003  obj.Unuse();
6004  destination()->Split(below_equal);
6005}
6006
6007
6008void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
6009  // This generates a fast version of:
6010  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
6011  // typeof(arg) == function).
6012  // It includes undetectable objects (as opposed to IsObject).
6013  ASSERT(args->length() == 1);
6014  Load(args->at(0));
6015  Result value = frame_->Pop();
6016  value.ToRegister();
6017  ASSERT(value.is_valid());
6018  Condition is_smi = masm_->CheckSmi(value.reg());
6019  destination()->false_target()->Branch(is_smi);
6020  // Check that this is an object.
6021  __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
6022  value.Unuse();
6023  destination()->Split(above_equal);
6024}
6025
6026
6027// Deferred code to check whether the String JavaScript object is safe for using
6028// default value of. This code is called after the bit caching this information
6029// in the map has been checked with the map for the object in the map_result_
6030// register. On return the register map_result_ contains 1 for true and 0 for
6031// false.
6032class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
6033 public:
6034  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
6035                                               Register map_result,
6036                                               Register scratch1,
6037                                               Register scratch2)
6038      : object_(object),
6039        map_result_(map_result),
6040        scratch1_(scratch1),
6041        scratch2_(scratch2) { }
6042
6043  virtual void Generate() {
6044    Label false_result;
6045
6046    // Check that map is loaded as expected.
6047    if (FLAG_debug_code) {
6048      __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
6049      __ Assert(equal, "Map not in expected register");
6050    }
6051
6052    // Check for fast case object. Generate false result for slow case object.
6053    __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
6054    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
6055    __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
6056    __ j(equal, &false_result);
6057
6058    // Look for valueOf symbol in the descriptor array, and indicate false if
6059    // found. The type is not checked, so if it is a transition it is a false
6060    // negative.
6061    __ movq(map_result_,
6062           FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
6063    __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
6064    // map_result_: descriptor array
6065    // scratch1_: length of descriptor array
6066    // Calculate the end of the descriptor array.
6067    SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
6068    __ lea(scratch1_,
6069           Operand(
6070               map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
6071    // Calculate location of the first key name.
6072    __ addq(map_result_,
6073            Immediate(FixedArray::kHeaderSize +
6074                      DescriptorArray::kFirstIndex * kPointerSize));
6075    // Loop through all the keys in the descriptor array. If one of these is the
6076    // symbol valueOf the result is false.
6077    Label entry, loop;
6078    __ jmp(&entry);
6079    __ bind(&loop);
6080    __ movq(scratch2_, FieldOperand(map_result_, 0));
6081    __ Cmp(scratch2_, FACTORY->value_of_symbol());
6082    __ j(equal, &false_result);
6083    __ addq(map_result_, Immediate(kPointerSize));
6084    __ bind(&entry);
6085    __ cmpq(map_result_, scratch1_);
6086    __ j(not_equal, &loop);
6087
6088    // Reload map as register map_result_ was used as temporary above.
6089    __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
6090
6091    // If a valueOf property is not found on the object check that it's
6092    // prototype is the un-modified String prototype. If not result is false.
6093    __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
6094    __ testq(scratch1_, Immediate(kSmiTagMask));
6095    __ j(zero, &false_result);
6096    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
6097    __ movq(scratch2_,
6098            Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6099    __ movq(scratch2_,
6100            FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
6101    __ cmpq(scratch1_,
6102            ContextOperand(
6103                scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
6104    __ j(not_equal, &false_result);
6105    // Set the bit in the map to indicate that it has been checked safe for
6106    // default valueOf and set true result.
6107    __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
6108           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
6109    __ Set(map_result_, 1);
6110    __ jmp(exit_label());
6111    __ bind(&false_result);
6112    // Set false result.
6113    __ Set(map_result_, 0);
6114  }
6115
6116 private:
6117  Register object_;
6118  Register map_result_;
6119  Register scratch1_;
6120  Register scratch2_;
6121};
6122
6123
6124void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
6125    ZoneList<Expression*>* args) {
6126  ASSERT(args->length() == 1);
6127  Load(args->at(0));
6128  Result obj = frame_->Pop();  // Pop the string wrapper.
6129  obj.ToRegister();
6130  ASSERT(obj.is_valid());
6131  if (FLAG_debug_code) {
6132    __ AbortIfSmi(obj.reg());
6133  }
6134
6135  // Check whether this map has already been checked to be safe for default
6136  // valueOf.
6137  Result map_result = allocator()->Allocate();
6138  ASSERT(map_result.is_valid());
6139  __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
6140  __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
6141           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
6142  destination()->true_target()->Branch(not_zero);
6143
6144  // We need an additional two scratch registers for the deferred code.
6145  Result temp1 = allocator()->Allocate();
6146  ASSERT(temp1.is_valid());
6147  Result temp2 = allocator()->Allocate();
6148  ASSERT(temp2.is_valid());
6149
6150  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
6151      new DeferredIsStringWrapperSafeForDefaultValueOf(
6152          obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
6153  deferred->Branch(zero);
6154  deferred->BindExit();
6155  __ testq(map_result.reg(), map_result.reg());
6156  obj.Unuse();
6157  map_result.Unuse();
6158  temp1.Unuse();
6159  temp2.Unuse();
6160  destination()->Split(not_equal);
6161}
6162
6163
6164void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
6165  // This generates a fast version of:
6166  // (%_ClassOf(arg) === 'Function')
6167  ASSERT(args->length() == 1);
6168  Load(args->at(0));
6169  Result obj = frame_->Pop();
6170  obj.ToRegister();
6171  Condition is_smi = masm_->CheckSmi(obj.reg());
6172  destination()->false_target()->Branch(is_smi);
6173  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
6174  obj.Unuse();
6175  destination()->Split(equal);
6176}
6177
6178
6179void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
6180  ASSERT(args->length() == 1);
6181  Load(args->at(0));
6182  Result obj = frame_->Pop();
6183  obj.ToRegister();
6184  Condition is_smi = masm_->CheckSmi(obj.reg());
6185  destination()->false_target()->Branch(is_smi);
6186  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
6187  __ movzxbl(kScratchRegister,
6188             FieldOperand(kScratchRegister, Map::kBitFieldOffset));
6189  __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
6190  obj.Unuse();
6191  destination()->Split(not_zero);
6192}
6193
6194
6195void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
6196  ASSERT(args->length() == 0);
6197
6198  // Get the frame pointer for the calling frame.
6199  Result fp = allocator()->Allocate();
6200  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6201
6202  // Skip the arguments adaptor frame if it exists.
6203  Label check_frame_marker;
6204  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
6205         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6206  __ j(not_equal, &check_frame_marker);
6207  __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
6208
6209  // Check the marker in the calling frame.
6210  __ bind(&check_frame_marker);
6211  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
6212         Smi::FromInt(StackFrame::CONSTRUCT));
6213  fp.Unuse();
6214  destination()->Split(equal);
6215}
6216
6217
6218void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
6219  ASSERT(args->length() == 0);
6220
6221  Result fp = allocator_->Allocate();
6222  Result result = allocator_->Allocate();
6223  ASSERT(fp.is_valid() && result.is_valid());
6224
6225  Label exit;
6226
6227  // Get the number of formal parameters.
6228  __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
6229
6230  // Check if the calling frame is an arguments adaptor frame.
6231  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6232  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
6233         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6234  __ j(not_equal, &exit);
6235
6236  // Arguments adaptor case: Read the arguments length from the
6237  // adaptor frame.
6238  __ movq(result.reg(),
6239          Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
6240
6241  __ bind(&exit);
6242  result.set_type_info(TypeInfo::Smi());
6243  if (FLAG_debug_code) {
6244    __ AbortIfNotSmi(result.reg());
6245  }
6246  frame_->Push(&result);
6247}
6248
6249
6250void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
6251  ASSERT(args->length() == 1);
6252  JumpTarget leave, null, function, non_function_constructor;
6253  Load(args->at(0));  // Load the object.
6254  Result obj = frame_->Pop();
6255  obj.ToRegister();
6256  frame_->Spill(obj.reg());
6257
6258  // If the object is a smi, we return null.
6259  Condition is_smi = masm_->CheckSmi(obj.reg());
6260  null.Branch(is_smi);
6261
6262  // Check that the object is a JS object but take special care of JS
6263  // functions to make sure they have 'Function' as their class.
6264
6265  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
6266  null.Branch(below);
6267
6268  // As long as JS_FUNCTION_TYPE is the last instance type and it is
6269  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
6270  // LAST_JS_OBJECT_TYPE.
6271  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6272  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
6273  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
6274  function.Branch(equal);
6275
6276  // Check if the constructor in the map is a function.
6277  __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
6278  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
6279  non_function_constructor.Branch(not_equal);
6280
6281  // The obj register now contains the constructor function. Grab the
6282  // instance class name from there.
6283  __ movq(obj.reg(),
6284          FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
6285  __ movq(obj.reg(),
6286          FieldOperand(obj.reg(),
6287                       SharedFunctionInfo::kInstanceClassNameOffset));
6288  frame_->Push(&obj);
6289  leave.Jump();
6290
6291  // Functions have class 'Function'.
6292  function.Bind();
6293  frame_->Push(FACTORY->function_class_symbol());
6294  leave.Jump();
6295
6296  // Objects with a non-function constructor have class 'Object'.
6297  non_function_constructor.Bind();
6298  frame_->Push(FACTORY->Object_symbol());
6299  leave.Jump();
6300
6301  // Non-JS objects have class null.
6302  null.Bind();
6303  frame_->Push(FACTORY->null_value());
6304
6305  // All done.
6306  leave.Bind();
6307}
6308
6309
6310void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
6311  ASSERT(args->length() == 1);
6312  JumpTarget leave;
6313  Load(args->at(0));  // Load the object.
6314  frame_->Dup();
6315  Result object = frame_->Pop();
6316  object.ToRegister();
6317  ASSERT(object.is_valid());
6318  // if (object->IsSmi()) return object.
6319  Condition is_smi = masm_->CheckSmi(object.reg());
6320  leave.Branch(is_smi);
6321  // It is a heap object - get map.
6322  Result temp = allocator()->Allocate();
6323  ASSERT(temp.is_valid());
6324  // if (!object->IsJSValue()) return object.
6325  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
6326  leave.Branch(not_equal);
6327  __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
6328  object.Unuse();
6329  frame_->SetElementAt(0, &temp);
6330  leave.Bind();
6331}
6332
6333
6334void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
6335  ASSERT(args->length() == 2);
6336  JumpTarget leave;
6337  Load(args->at(0));  // Load the object.
6338  Load(args->at(1));  // Load the value.
6339  Result value = frame_->Pop();
6340  Result object = frame_->Pop();
6341  value.ToRegister();
6342  object.ToRegister();
6343
6344  // if (object->IsSmi()) return value.
6345  Condition is_smi = masm_->CheckSmi(object.reg());
6346  leave.Branch(is_smi, &value);
6347
6348  // It is a heap object - get its map.
6349  Result scratch = allocator_->Allocate();
6350  ASSERT(scratch.is_valid());
6351  // if (!object->IsJSValue()) return value.
6352  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
6353  leave.Branch(not_equal, &value);
6354
6355  // Store the value.
6356  __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
6357  // Update the write barrier.  Save the value as it will be
6358  // overwritten by the write barrier code and is needed afterward.
6359  Result duplicate_value = allocator_->Allocate();
6360  ASSERT(duplicate_value.is_valid());
6361  __ movq(duplicate_value.reg(), value.reg());
6362  // The object register is also overwritten by the write barrier and
6363  // possibly aliased in the frame.
6364  frame_->Spill(object.reg());
6365  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
6366                 scratch.reg());
6367  object.Unuse();
6368  scratch.Unuse();
6369  duplicate_value.Unuse();
6370
6371  // Leave.
6372  leave.Bind(&value);
6373  frame_->Push(&value);
6374}
6375
6376
6377void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
6378  ASSERT(args->length() == 1);
6379
6380  // ArgumentsAccessStub expects the key in rdx and the formal
6381  // parameter count in rax.
6382  Load(args->at(0));
6383  Result key = frame_->Pop();
6384  // Explicitly create a constant result.
6385  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
6386  // Call the shared stub to get to arguments[key].
6387  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
6388  Result result = frame_->CallStub(&stub, &key, &count);
6389  frame_->Push(&result);
6390}
6391
6392
6393void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
6394  ASSERT(args->length() == 2);
6395
6396  // Load the two objects into registers and perform the comparison.
6397  Load(args->at(0));
6398  Load(args->at(1));
6399  Result right = frame_->Pop();
6400  Result left = frame_->Pop();
6401  right.ToRegister();
6402  left.ToRegister();
6403  __ cmpq(right.reg(), left.reg());
6404  right.Unuse();
6405  left.Unuse();
6406  destination()->Split(equal);
6407}
6408
6409
6410void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
6411  ASSERT(args->length() == 0);
6412  // RBP value is aligned, so it should be tagged as a smi (without necesarily
6413  // being padded as a smi, so it should not be treated as a smi.).
6414  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
6415  Result rbp_as_smi = allocator_->Allocate();
6416  ASSERT(rbp_as_smi.is_valid());
6417  __ movq(rbp_as_smi.reg(), rbp);
6418  frame_->Push(&rbp_as_smi);
6419}
6420
6421
6422void CodeGenerator::GenerateRandomHeapNumber(
6423    ZoneList<Expression*>* args) {
6424  ASSERT(args->length() == 0);
6425  frame_->SpillAll();
6426
6427  Label slow_allocate_heapnumber;
6428  Label heapnumber_allocated;
6429  __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
6430  __ jmp(&heapnumber_allocated);
6431
6432  __ bind(&slow_allocate_heapnumber);
6433  // Allocate a heap number.
6434  __ CallRuntime(Runtime::kNumberAlloc, 0);
6435  __ movq(rbx, rax);
6436
6437  __ bind(&heapnumber_allocated);
6438
6439  // Return a random uint32 number in rax.
6440  // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
6441  __ PrepareCallCFunction(0);
6442  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
6443
6444  // Convert 32 random bits in rax to 0.(32 random bits) in a double
6445  // by computing:
6446  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
6447  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
6448  __ movd(xmm1, rcx);
6449  __ movd(xmm0, rax);
6450  __ cvtss2sd(xmm1, xmm1);
6451  __ xorpd(xmm0, xmm1);
6452  __ subsd(xmm0, xmm1);
6453  __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
6454
6455  __ movq(rax, rbx);
6456  Result result = allocator_->Allocate(rax);
6457  frame_->Push(&result);
6458}
6459
6460
6461void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
6462  ASSERT_EQ(2, args->length());
6463
6464  Load(args->at(0));
6465  Load(args->at(1));
6466
6467  StringAddStub stub(NO_STRING_ADD_FLAGS);
6468  Result answer = frame_->CallStub(&stub, 2);
6469  frame_->Push(&answer);
6470}
6471
6472
6473void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
6474  ASSERT_EQ(3, args->length());
6475
6476  Load(args->at(0));
6477  Load(args->at(1));
6478  Load(args->at(2));
6479
6480  SubStringStub stub;
6481  Result answer = frame_->CallStub(&stub, 3);
6482  frame_->Push(&answer);
6483}
6484
6485
6486void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
6487  ASSERT_EQ(2, args->length());
6488
6489  Load(args->at(0));
6490  Load(args->at(1));
6491
6492  StringCompareStub stub;
6493  Result answer = frame_->CallStub(&stub, 2);
6494  frame_->Push(&answer);
6495}
6496
6497
6498void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
6499  ASSERT_EQ(args->length(), 4);
6500
6501  // Load the arguments on the stack and call the runtime system.
6502  Load(args->at(0));
6503  Load(args->at(1));
6504  Load(args->at(2));
6505  Load(args->at(3));
6506  RegExpExecStub stub;
6507  Result result = frame_->CallStub(&stub, 4);
6508  frame_->Push(&result);
6509}
6510
6511
6512void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
6513  ASSERT_EQ(3, args->length());
6514  Load(args->at(0));  // Size of array, smi.
6515  Load(args->at(1));  // "index" property value.
6516  Load(args->at(2));  // "input" property value.
6517  RegExpConstructResultStub stub;
6518  Result result = frame_->CallStub(&stub, 3);
6519  frame_->Push(&result);
6520}
6521
6522
6523class DeferredSearchCache: public DeferredCode {
6524 public:
6525  DeferredSearchCache(Register dst,
6526                      Register cache,
6527                      Register key,
6528                      Register scratch)
6529      : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
6530    set_comment("[ DeferredSearchCache");
6531  }
6532
6533  virtual void Generate();
6534
6535 private:
6536  Register dst_;    // on invocation index of finger (as int32), on exit
6537                    // holds value being looked up.
6538  Register cache_;  // instance of JSFunctionResultCache.
6539  Register key_;    // key being looked up.
6540  Register scratch_;
6541};
6542
6543
6544// Return a position of the element at |index| + |additional_offset|
6545// in FixedArray pointer to which is held in |array|.  |index| is int32.
6546static Operand ArrayElement(Register array,
6547                            Register index,
6548                            int additional_offset = 0) {
6549  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
6550  return FieldOperand(array, index, times_pointer_size, offset);
6551}
6552
6553
6554void DeferredSearchCache::Generate() {
6555  Label first_loop, search_further, second_loop, cache_miss;
6556
6557  Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
6558  Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
6559
6560  // Check the cache from finger to start of the cache.
6561  __ bind(&first_loop);
6562  __ subl(dst_, kEntrySizeImm);
6563  __ cmpl(dst_, kEntriesIndexImm);
6564  __ j(less, &search_further);
6565
6566  __ cmpq(ArrayElement(cache_, dst_), key_);
6567  __ j(not_equal, &first_loop);
6568
6569  __ Integer32ToSmiField(
6570      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
6571  __ movq(dst_, ArrayElement(cache_, dst_, 1));
6572  __ jmp(exit_label());
6573
6574  __ bind(&search_further);
6575
6576  // Check the cache from end of cache up to finger.
6577  __ SmiToInteger32(dst_,
6578                    FieldOperand(cache_,
6579                                 JSFunctionResultCache::kCacheSizeOffset));
6580  __ SmiToInteger32(scratch_,
6581                    FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
6582
6583  __ bind(&second_loop);
6584  __ subl(dst_, kEntrySizeImm);
6585  __ cmpl(dst_, scratch_);
6586  __ j(less_equal, &cache_miss);
6587
6588  __ cmpq(ArrayElement(cache_, dst_), key_);
6589  __ j(not_equal, &second_loop);
6590
6591  __ Integer32ToSmiField(
6592      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
6593  __ movq(dst_, ArrayElement(cache_, dst_, 1));
6594  __ jmp(exit_label());
6595
6596  __ bind(&cache_miss);
6597  __ push(cache_);  // store a reference to cache
6598  __ push(key_);  // store a key
6599  __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6600  __ push(key_);
6601  // On x64 function must be in rdi.
6602  __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
6603  ParameterCount expected(1);
6604  __ InvokeFunction(rdi, expected, CALL_FUNCTION);
6605
6606  // Find a place to put new cached value into.
6607  Label add_new_entry, update_cache;
6608  __ movq(rcx, Operand(rsp, kPointerSize));  // restore the cache
6609  // Possible optimization: cache size is constant for the given cache
6610  // so technically we could use a constant here.  However, if we have
6611  // cache miss this optimization would hardly matter much.
6612
6613  // Check if we could add new entry to cache.
6614  __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
6615  __ SmiToInteger32(r9,
6616                    FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
6617  __ cmpl(rbx, r9);
6618  __ j(greater, &add_new_entry);
6619
6620  // Check if we could evict entry after finger.
6621  __ SmiToInteger32(rdx,
6622                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
6623  __ addl(rdx, kEntrySizeImm);
6624  Label forward;
6625  __ cmpl(rbx, rdx);
6626  __ j(greater, &forward);
6627  // Need to wrap over the cache.
6628  __ movl(rdx, kEntriesIndexImm);
6629  __ bind(&forward);
6630  __ movl(r9, rdx);
6631  __ jmp(&update_cache);
6632
6633  __ bind(&add_new_entry);
6634  // r9 holds cache size as int32.
6635  __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
6636  __ Integer32ToSmiField(
6637      FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
6638
6639  // Update the cache itself.
6640  // r9 holds the index as int32.
6641  __ bind(&update_cache);
6642  __ pop(rbx);  // restore the key
6643  __ Integer32ToSmiField(
6644      FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
6645  // Store key.
6646  __ movq(ArrayElement(rcx, r9), rbx);
6647  __ RecordWrite(rcx, 0, rbx, r9);
6648
6649  // Store value.
6650  __ pop(rcx);  // restore the cache.
6651  __ SmiToInteger32(rdx,
6652                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
6653  __ incl(rdx);
6654  // Backup rax, because the RecordWrite macro clobbers its arguments.
6655  __ movq(rbx, rax);
6656  __ movq(ArrayElement(rcx, rdx), rax);
6657  __ RecordWrite(rcx, 0, rbx, rdx);
6658
6659  if (!dst_.is(rax)) {
6660    __ movq(dst_, rax);
6661  }
6662}
6663
6664
6665void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
6666  ASSERT_EQ(2, args->length());
6667
6668  ASSERT_NE(NULL, args->at(0)->AsLiteral());
6669  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
6670
6671  Handle<FixedArray> jsfunction_result_caches(
6672      Isolate::Current()->global_context()->jsfunction_result_caches());
6673  if (jsfunction_result_caches->length() <= cache_id) {
6674    __ Abort("Attempt to use undefined cache.");
6675    frame_->Push(FACTORY->undefined_value());
6676    return;
6677  }
6678
6679  Load(args->at(1));
6680  Result key = frame_->Pop();
6681  key.ToRegister();
6682
6683  Result cache = allocator()->Allocate();
6684  ASSERT(cache.is_valid());
6685  __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
6686  __ movq(cache.reg(),
6687          FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
6688  __ movq(cache.reg(),
6689          ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
6690  __ movq(cache.reg(),
6691          FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
6692
6693  Result tmp = allocator()->Allocate();
6694  ASSERT(tmp.is_valid());
6695
6696  Result scratch = allocator()->Allocate();
6697  ASSERT(scratch.is_valid());
6698
6699  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
6700                                                          cache.reg(),
6701                                                          key.reg(),
6702                                                          scratch.reg());
6703
6704  const int kFingerOffset =
6705      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
6706  // tmp.reg() now holds finger offset as a smi.
6707  __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
6708  __ cmpq(key.reg(), FieldOperand(cache.reg(),
6709                                  tmp.reg(), times_pointer_size,
6710                                  FixedArray::kHeaderSize));
6711  deferred->Branch(not_equal);
6712  __ movq(tmp.reg(), FieldOperand(cache.reg(),
6713                                  tmp.reg(), times_pointer_size,
6714                                  FixedArray::kHeaderSize + kPointerSize));
6715
6716  deferred->BindExit();
6717  frame_->Push(&tmp);
6718}
6719
6720
6721void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
6722  ASSERT_EQ(args->length(), 1);
6723
6724  // Load the argument on the stack and jump to the runtime.
6725  Load(args->at(0));
6726
6727  NumberToStringStub stub;
6728  Result result = frame_->CallStub(&stub, 1);
6729  frame_->Push(&result);
6730}
6731
6732
6733class DeferredSwapElements: public DeferredCode {
6734 public:
6735  DeferredSwapElements(Register object, Register index1, Register index2)
6736      : object_(object), index1_(index1), index2_(index2) {
6737    set_comment("[ DeferredSwapElements");
6738  }
6739
6740  virtual void Generate();
6741
6742 private:
6743  Register object_, index1_, index2_;
6744};
6745
6746
6747void DeferredSwapElements::Generate() {
6748  __ push(object_);
6749  __ push(index1_);
6750  __ push(index2_);
6751  __ CallRuntime(Runtime::kSwapElements, 3);
6752}
6753
6754
6755void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
6756  Comment cmnt(masm_, "[ GenerateSwapElements");
6757
6758  ASSERT_EQ(3, args->length());
6759
6760  Load(args->at(0));
6761  Load(args->at(1));
6762  Load(args->at(2));
6763
6764  Result index2 = frame_->Pop();
6765  index2.ToRegister();
6766
6767  Result index1 = frame_->Pop();
6768  index1.ToRegister();
6769
6770  Result object = frame_->Pop();
6771  object.ToRegister();
6772
6773  Result tmp1 = allocator()->Allocate();
6774  tmp1.ToRegister();
6775  Result tmp2 = allocator()->Allocate();
6776  tmp2.ToRegister();
6777
6778  frame_->Spill(object.reg());
6779  frame_->Spill(index1.reg());
6780  frame_->Spill(index2.reg());
6781
6782  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
6783                                                            index1.reg(),
6784                                                            index2.reg());
6785
6786  // Fetch the map and check if array is in fast case.
6787  // Check that object doesn't require security checks and
6788  // has no indexed interceptor.
6789  __ CmpObjectType(object.reg(), JS_ARRAY_TYPE, tmp1.reg());
6790  deferred->Branch(not_equal);
6791  __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
6792           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
6793  deferred->Branch(not_zero);
6794
6795  // Check the object's elements are in fast case and writable.
6796  __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
6797  __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
6798                 Heap::kFixedArrayMapRootIndex);
6799  deferred->Branch(not_equal);
6800
6801  // Check that both indices are smis.
6802  Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
6803  deferred->Branch(NegateCondition(both_smi));
6804
6805  // Check that both indices are valid.
6806  __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
6807  __ SmiCompare(tmp2.reg(), index1.reg());
6808  deferred->Branch(below_equal);
6809  __ SmiCompare(tmp2.reg(), index2.reg());
6810  deferred->Branch(below_equal);
6811
6812  // Bring addresses into index1 and index2.
6813  __ SmiToInteger32(index1.reg(), index1.reg());
6814  __ lea(index1.reg(), FieldOperand(tmp1.reg(),
6815                                    index1.reg(),
6816                                    times_pointer_size,
6817                                    FixedArray::kHeaderSize));
6818  __ SmiToInteger32(index2.reg(), index2.reg());
6819  __ lea(index2.reg(), FieldOperand(tmp1.reg(),
6820                                    index2.reg(),
6821                                    times_pointer_size,
6822                                    FixedArray::kHeaderSize));
6823
6824  // Swap elements.
6825  __ movq(object.reg(), Operand(index1.reg(), 0));
6826  __ movq(tmp2.reg(), Operand(index2.reg(), 0));
6827  __ movq(Operand(index2.reg(), 0), object.reg());
6828  __ movq(Operand(index1.reg(), 0), tmp2.reg());
6829
6830  Label done;
6831  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
6832  // Possible optimization: do a check that both values are smis
6833  // (or them and test against Smi mask.)
6834
6835  __ movq(tmp2.reg(), tmp1.reg());
6836  __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg());
6837  __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg());
6838  __ bind(&done);
6839
6840  deferred->BindExit();
6841  frame_->Push(FACTORY->undefined_value());
6842}
6843
6844
6845void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
6846  Comment cmnt(masm_, "[ GenerateCallFunction");
6847
6848  ASSERT(args->length() >= 2);
6849
6850  int n_args = args->length() - 2;  // for receiver and function.
6851  Load(args->at(0));  // receiver
6852  for (int i = 0; i < n_args; i++) {
6853    Load(args->at(i + 1));
6854  }
6855  Load(args->at(n_args + 1));  // function
6856  Result result = frame_->CallJSFunction(n_args);
6857  frame_->Push(&result);
6858}
6859
6860
6861// Generates the Math.pow method. Only handles special cases and
6862// branches to the runtime system for everything else. Please note
6863// that this function assumes that the callsite has executed ToNumber
6864// on both arguments.
6865void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
6866  ASSERT(args->length() == 2);
6867  Load(args->at(0));
6868  Load(args->at(1));
6869
6870  Label allocate_return;
6871  // Load the two operands while leaving the values on the frame.
6872  frame()->Dup();
6873  Result exponent = frame()->Pop();
6874  exponent.ToRegister();
6875  frame()->Spill(exponent.reg());
6876  frame()->PushElementAt(1);
6877  Result base = frame()->Pop();
6878  base.ToRegister();
6879  frame()->Spill(base.reg());
6880
6881  Result answer = allocator()->Allocate();
6882  ASSERT(answer.is_valid());
6883  ASSERT(!exponent.reg().is(base.reg()));
6884  JumpTarget call_runtime;
6885
6886  // Save 1 in xmm3 - we need this several times later on.
6887  __ movl(answer.reg(), Immediate(1));
6888  __ cvtlsi2sd(xmm3, answer.reg());
6889
6890  Label exponent_nonsmi;
6891  Label base_nonsmi;
6892  // If the exponent is a heap number go to that specific case.
6893  __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
6894  __ JumpIfNotSmi(base.reg(), &base_nonsmi);
6895
6896  // Optimized version when y is an integer.
6897  Label powi;
6898  __ SmiToInteger32(base.reg(), base.reg());
6899  __ cvtlsi2sd(xmm0, base.reg());
6900  __ jmp(&powi);
6901  // exponent is smi and base is a heapnumber.
6902  __ bind(&base_nonsmi);
6903  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
6904                 Heap::kHeapNumberMapRootIndex);
6905  call_runtime.Branch(not_equal);
6906
6907  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
6908
6909  // Optimized version of pow if y is an integer.
6910  __ bind(&powi);
6911  __ SmiToInteger32(exponent.reg(), exponent.reg());
6912
6913  // Save exponent in base as we need to check if exponent is negative later.
6914  // We know that base and exponent are in different registers.
6915  __ movl(base.reg(), exponent.reg());
6916
6917  // Get absolute value of exponent.
6918  Label no_neg;
6919  __ cmpl(exponent.reg(), Immediate(0));
6920  __ j(greater_equal, &no_neg);
6921  __ negl(exponent.reg());
6922  __ bind(&no_neg);
6923
6924  // Load xmm1 with 1.
6925  __ movsd(xmm1, xmm3);
6926  Label while_true;
6927  Label no_multiply;
6928
6929  __ bind(&while_true);
6930  __ shrl(exponent.reg(), Immediate(1));
6931  __ j(not_carry, &no_multiply);
6932  __ mulsd(xmm1, xmm0);
6933  __ bind(&no_multiply);
6934  __ testl(exponent.reg(), exponent.reg());
6935  __ mulsd(xmm0, xmm0);
6936  __ j(not_zero, &while_true);
6937
6938  // x has the original value of y - if y is negative return 1/result.
6939  __ testl(base.reg(), base.reg());
6940  __ j(positive, &allocate_return);
6941  // Special case if xmm1 has reached infinity.
6942  __ movl(answer.reg(), Immediate(0x7FB00000));
6943  __ movd(xmm0, answer.reg());
6944  __ cvtss2sd(xmm0, xmm0);
6945  __ ucomisd(xmm0, xmm1);
6946  call_runtime.Branch(equal);
6947  __ divsd(xmm3, xmm1);
6948  __ movsd(xmm1, xmm3);
6949  __ jmp(&allocate_return);
6950
6951  // exponent (or both) is a heapnumber - no matter what we should now work
6952  // on doubles.
6953  __ bind(&exponent_nonsmi);
6954  __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
6955                 Heap::kHeapNumberMapRootIndex);
6956  call_runtime.Branch(not_equal);
6957  __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
6958  // Test if exponent is nan.
6959  __ ucomisd(xmm1, xmm1);
6960  call_runtime.Branch(parity_even);
6961
6962  Label base_not_smi;
6963  Label handle_special_cases;
6964  __ JumpIfNotSmi(base.reg(), &base_not_smi);
6965  __ SmiToInteger32(base.reg(), base.reg());
6966  __ cvtlsi2sd(xmm0, base.reg());
6967  __ jmp(&handle_special_cases);
6968  __ bind(&base_not_smi);
6969  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
6970                 Heap::kHeapNumberMapRootIndex);
6971  call_runtime.Branch(not_equal);
6972  __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
6973  __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
6974  __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
6975  // base is NaN or +/-Infinity
6976  call_runtime.Branch(greater_equal);
6977  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
6978
6979  // base is in xmm0 and exponent is in xmm1.
6980  __ bind(&handle_special_cases);
6981  Label not_minus_half;
6982  // Test for -0.5.
6983  // Load xmm2 with -0.5.
6984  __ movl(answer.reg(), Immediate(0xBF000000));
6985  __ movd(xmm2, answer.reg());
6986  __ cvtss2sd(xmm2, xmm2);
6987  // xmm2 now has -0.5.
6988  __ ucomisd(xmm2, xmm1);
6989  __ j(not_equal, &not_minus_half);
6990
6991  // Calculates reciprocal of square root.
6992  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
6993  __ xorpd(xmm1, xmm1);
6994  __ addsd(xmm1, xmm0);
6995  __ sqrtsd(xmm1, xmm1);
6996  __ divsd(xmm3, xmm1);
6997  __ movsd(xmm1, xmm3);
6998  __ jmp(&allocate_return);
6999
7000  // Test for 0.5.
7001  __ bind(&not_minus_half);
7002  // Load xmm2 with 0.5.
7003  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
7004  __ addsd(xmm2, xmm3);
7005  // xmm2 now has 0.5.
7006  __ ucomisd(xmm2, xmm1);
7007  call_runtime.Branch(not_equal);
7008
7009  // Calculates square root.
7010  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
7011  __ xorpd(xmm1, xmm1);
7012  __ addsd(xmm1, xmm0);
7013  __ sqrtsd(xmm1, xmm1);
7014
7015  JumpTarget done;
7016  Label failure, success;
7017  __ bind(&allocate_return);
7018  // Make a copy of the frame to enable us to handle allocation
7019  // failure after the JumpTarget jump.
7020  VirtualFrame* clone = new VirtualFrame(frame());
7021  __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
7022  __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
7023  // Remove the two original values from the frame - we only need those
7024  // in the case where we branch to runtime.
7025  frame()->Drop(2);
7026  exponent.Unuse();
7027  base.Unuse();
7028  done.Jump(&answer);
7029  // Use the copy of the original frame as our current frame.
7030  RegisterFile empty_regs;
7031  SetFrame(clone, &empty_regs);
7032  // If we experience an allocation failure we branch to runtime.
7033  __ bind(&failure);
7034  call_runtime.Bind();
7035  answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
7036
7037  done.Bind(&answer);
7038  frame()->Push(&answer);
7039}
7040
7041
7042void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
7043  ASSERT_EQ(args->length(), 1);
7044  Load(args->at(0));
7045  TranscendentalCacheStub stub(TranscendentalCache::SIN,
7046                               TranscendentalCacheStub::TAGGED);
7047  Result result = frame_->CallStub(&stub, 1);
7048  frame_->Push(&result);
7049}
7050
7051
7052void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
7053  ASSERT_EQ(args->length(), 1);
7054  Load(args->at(0));
7055  TranscendentalCacheStub stub(TranscendentalCache::COS,
7056                               TranscendentalCacheStub::TAGGED);
7057  Result result = frame_->CallStub(&stub, 1);
7058  frame_->Push(&result);
7059}
7060
7061
7062void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
7063  ASSERT_EQ(args->length(), 1);
7064  Load(args->at(0));
7065  TranscendentalCacheStub stub(TranscendentalCache::LOG,
7066                               TranscendentalCacheStub::TAGGED);
7067  Result result = frame_->CallStub(&stub, 1);
7068  frame_->Push(&result);
7069}
7070
7071
7072// Generates the Math.sqrt method. Please note - this function assumes that
7073// the callsite has executed ToNumber on the argument.
7074void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
7075  ASSERT(args->length() == 1);
7076  Load(args->at(0));
7077
7078  // Leave original value on the frame if we need to call runtime.
7079  frame()->Dup();
7080  Result result = frame()->Pop();
7081  result.ToRegister();
7082  frame()->Spill(result.reg());
7083  Label runtime;
7084  Label non_smi;
7085  Label load_done;
7086  JumpTarget end;
7087
7088  __ JumpIfNotSmi(result.reg(), &non_smi);
7089  __ SmiToInteger32(result.reg(), result.reg());
7090  __ cvtlsi2sd(xmm0, result.reg());
7091  __ jmp(&load_done);
7092  __ bind(&non_smi);
7093  __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
7094                 Heap::kHeapNumberMapRootIndex);
7095  __ j(not_equal, &runtime);
7096  __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
7097
7098  __ bind(&load_done);
7099  __ sqrtsd(xmm0, xmm0);
7100  // A copy of the virtual frame to allow us to go to runtime after the
7101  // JumpTarget jump.
7102  Result scratch = allocator()->Allocate();
7103  VirtualFrame* clone = new VirtualFrame(frame());
7104  __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
7105
7106  __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
7107  frame()->Drop(1);
7108  scratch.Unuse();
7109  end.Jump(&result);
7110  // We only branch to runtime if we have an allocation error.
7111  // Use the copy of the original frame as our current frame.
7112  RegisterFile empty_regs;
7113  SetFrame(clone, &empty_regs);
7114  __ bind(&runtime);
7115  result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
7116
7117  end.Bind(&result);
7118  frame()->Push(&result);
7119}
7120
7121
7122void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
7123  ASSERT_EQ(2, args->length());
7124  Load(args->at(0));
7125  Load(args->at(1));
7126  Result right_res = frame_->Pop();
7127  Result left_res = frame_->Pop();
7128  right_res.ToRegister();
7129  left_res.ToRegister();
7130  Result tmp_res = allocator()->Allocate();
7131  ASSERT(tmp_res.is_valid());
7132  Register right = right_res.reg();
7133  Register left = left_res.reg();
7134  Register tmp = tmp_res.reg();
7135  right_res.Unuse();
7136  left_res.Unuse();
7137  tmp_res.Unuse();
7138  __ cmpq(left, right);
7139  destination()->true_target()->Branch(equal);
7140  // Fail if either is a non-HeapObject.
7141  Condition either_smi =
7142      masm()->CheckEitherSmi(left, right, tmp);
7143  destination()->false_target()->Branch(either_smi);
7144  __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
7145  __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
7146          Immediate(JS_REGEXP_TYPE));
7147  destination()->false_target()->Branch(not_equal);
7148  __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
7149  destination()->false_target()->Branch(not_equal);
7150  __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
7151  __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
7152  destination()->Split(equal);
7153}
7154
7155
7156void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
7157  ASSERT(args->length() == 1);
7158  Load(args->at(0));
7159  Result value = frame_->Pop();
7160  value.ToRegister();
7161  ASSERT(value.is_valid());
7162  __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
7163           Immediate(String::kContainsCachedArrayIndexMask));
7164  value.Unuse();
7165  destination()->Split(zero);
7166}
7167
7168
7169void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
7170  ASSERT(args->length() == 1);
7171  Load(args->at(0));
7172  Result string = frame_->Pop();
7173  string.ToRegister();
7174
7175  Result number = allocator()->Allocate();
7176  ASSERT(number.is_valid());
7177  __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
7178  __ IndexFromHash(number.reg(), number.reg());
7179  string.Unuse();
7180  frame_->Push(&number);
7181}
7182
7183
7184void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
7185  frame_->Push(FACTORY->undefined_value());
7186}
7187
7188
7189void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
7190  if (CheckForInlineRuntimeCall(node)) {
7191    return;
7192  }
7193
7194  ZoneList<Expression*>* args = node->arguments();
7195  Comment cmnt(masm_, "[ CallRuntime");
7196  const Runtime::Function* function = node->function();
7197
7198  if (function == NULL) {
7199    // Push the builtins object found in the current global object.
7200    Result temp = allocator()->Allocate();
7201    ASSERT(temp.is_valid());
7202    __ movq(temp.reg(), GlobalObjectOperand());
7203    __ movq(temp.reg(),
7204            FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
7205    frame_->Push(&temp);
7206  }
7207
7208  // Push the arguments ("left-to-right").
7209  int arg_count = args->length();
7210  for (int i = 0; i < arg_count; i++) {
7211    Load(args->at(i));
7212  }
7213
7214  if (function == NULL) {
7215    // Call the JS runtime function.
7216    frame_->Push(node->name());
7217    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
7218                                       arg_count,
7219                                       loop_nesting_);
7220    frame_->RestoreContextRegister();
7221    frame_->Push(&answer);
7222  } else {
7223    // Call the C runtime function.
7224    Result answer = frame_->CallRuntime(function, arg_count);
7225    frame_->Push(&answer);
7226  }
7227}
7228
7229
7230void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
7231  Comment cmnt(masm_, "[ UnaryOperation");
7232
7233  Token::Value op = node->op();
7234
7235  if (op == Token::NOT) {
7236    // Swap the true and false targets but keep the same actual label
7237    // as the fall through.
7238    destination()->Invert();
7239    LoadCondition(node->expression(), destination(), true);
7240    // Swap the labels back.
7241    destination()->Invert();
7242
7243  } else if (op == Token::DELETE) {
7244    Property* property = node->expression()->AsProperty();
7245    if (property != NULL) {
7246      Load(property->obj());
7247      Load(property->key());
7248      frame_->Push(Smi::FromInt(strict_mode_flag()));
7249      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
7250      frame_->Push(&answer);
7251      return;
7252    }
7253
7254    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
7255    if (variable != NULL) {
7256      // Delete of an unqualified identifier is disallowed in strict mode
7257      // but "delete this" is.
7258      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
7259      Slot* slot = variable->AsSlot();
7260      if (variable->is_global()) {
7261        LoadGlobal();
7262        frame_->Push(variable->name());
7263        frame_->Push(Smi::FromInt(kNonStrictMode));
7264        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
7265                                              CALL_FUNCTION, 3);
7266        frame_->Push(&answer);
7267
7268      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
7269        // Call the runtime to delete from the context holding the named
7270        // variable.  Sync the virtual frame eagerly so we can push the
7271        // arguments directly into place.
7272        frame_->SyncRange(0, frame_->element_count() - 1);
7273        frame_->EmitPush(rsi);
7274        frame_->EmitPush(variable->name());
7275        Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
7276        frame_->Push(&answer);
7277      } else {
7278        // Default: Result of deleting non-global, not dynamically
7279        // introduced variables is false.
7280        frame_->Push(FACTORY->false_value());
7281      }
7282    } else {
7283      // Default: Result of deleting expressions is true.
7284      Load(node->expression());  // may have side-effects
7285      frame_->SetElementAt(0, FACTORY->true_value());
7286    }
7287
7288  } else if (op == Token::TYPEOF) {
7289    // Special case for loading the typeof expression; see comment on
7290    // LoadTypeofExpression().
7291    LoadTypeofExpression(node->expression());
7292    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
7293    frame_->Push(&answer);
7294
7295  } else if (op == Token::VOID) {
7296    Expression* expression = node->expression();
7297    if (expression && expression->AsLiteral() && (
7298        expression->AsLiteral()->IsTrue() ||
7299        expression->AsLiteral()->IsFalse() ||
7300        expression->AsLiteral()->handle()->IsNumber() ||
7301        expression->AsLiteral()->handle()->IsString() ||
7302        expression->AsLiteral()->handle()->IsJSRegExp() ||
7303        expression->AsLiteral()->IsNull())) {
7304      // Omit evaluating the value of the primitive literal.
7305      // It will be discarded anyway, and can have no side effect.
7306      frame_->Push(FACTORY->undefined_value());
7307    } else {
7308      Load(node->expression());
7309      frame_->SetElementAt(0, FACTORY->undefined_value());
7310    }
7311
7312  } else {
7313    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
7314    UnaryOverwriteMode overwrite =
7315        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
7316    bool no_negative_zero = node->expression()->no_negative_zero();
7317    Load(node->expression());
7318    switch (op) {
7319      case Token::NOT:
7320      case Token::DELETE:
7321      case Token::TYPEOF:
7322        UNREACHABLE();  // handled above
7323        break;
7324
7325      case Token::SUB: {
7326        GenericUnaryOpStub stub(
7327            Token::SUB,
7328            overwrite,
7329            NO_UNARY_FLAGS,
7330            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
7331        Result operand = frame_->Pop();
7332        Result answer = frame_->CallStub(&stub, &operand);
7333        answer.set_type_info(TypeInfo::Number());
7334        frame_->Push(&answer);
7335        break;
7336      }
7337
7338      case Token::BIT_NOT: {
7339        // Smi check.
7340        JumpTarget smi_label;
7341        JumpTarget continue_label;
7342        Result operand = frame_->Pop();
7343        operand.ToRegister();
7344
7345        Condition is_smi = masm_->CheckSmi(operand.reg());
7346        smi_label.Branch(is_smi, &operand);
7347
7348        GenericUnaryOpStub stub(Token::BIT_NOT,
7349                                overwrite,
7350                                NO_UNARY_SMI_CODE_IN_STUB);
7351        Result answer = frame_->CallStub(&stub, &operand);
7352        continue_label.Jump(&answer);
7353
7354        smi_label.Bind(&answer);
7355        answer.ToRegister();
7356        frame_->Spill(answer.reg());
7357        __ SmiNot(answer.reg(), answer.reg());
7358        continue_label.Bind(&answer);
7359        answer.set_type_info(TypeInfo::Smi());
7360        frame_->Push(&answer);
7361        break;
7362      }
7363
7364      case Token::ADD: {
7365        // Smi check.
7366        JumpTarget continue_label;
7367        Result operand = frame_->Pop();
7368        TypeInfo operand_info = operand.type_info();
7369        operand.ToRegister();
7370        Condition is_smi = masm_->CheckSmi(operand.reg());
7371        continue_label.Branch(is_smi, &operand);
7372        frame_->Push(&operand);
7373        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
7374                                              CALL_FUNCTION, 1);
7375
7376        continue_label.Bind(&answer);
7377        if (operand_info.IsSmi()) {
7378          answer.set_type_info(TypeInfo::Smi());
7379        } else if (operand_info.IsInteger32()) {
7380          answer.set_type_info(TypeInfo::Integer32());
7381        } else {
7382          answer.set_type_info(TypeInfo::Number());
7383        }
7384        frame_->Push(&answer);
7385        break;
7386      }
7387      default:
7388        UNREACHABLE();
7389    }
7390  }
7391}
7392
7393
7394// The value in dst was optimistically incremented or decremented.
7395// The result overflowed or was not smi tagged.  Call into the runtime
7396// to convert the argument to a number, and call the specialized add
7397// or subtract stub.  The result is left in dst.
7398class DeferredPrefixCountOperation: public DeferredCode {
7399 public:
7400  DeferredPrefixCountOperation(Register dst,
7401                               bool is_increment,
7402                               TypeInfo input_type)
7403      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
7404    set_comment("[ DeferredCountOperation");
7405  }
7406
7407  virtual void Generate();
7408
7409 private:
7410  Register dst_;
7411  bool is_increment_;
7412  TypeInfo input_type_;
7413};
7414
7415
7416void DeferredPrefixCountOperation::Generate() {
7417  Register left;
7418  if (input_type_.IsNumber()) {
7419    left = dst_;
7420  } else {
7421    __ push(dst_);
7422    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
7423    left = rax;
7424  }
7425
7426  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
7427                           NO_OVERWRITE,
7428                           NO_GENERIC_BINARY_FLAGS,
7429                           TypeInfo::Number());
7430  stub.GenerateCall(masm_, left, Smi::FromInt(1));
7431
7432  if (!dst_.is(rax)) __ movq(dst_, rax);
7433}
7434
7435
7436// The value in dst was optimistically incremented or decremented.
7437// The result overflowed or was not smi tagged.  Call into the runtime
7438// to convert the argument to a number.  Update the original value in
7439// old.  Call the specialized add or subtract stub.  The result is
7440// left in dst.
7441class DeferredPostfixCountOperation: public DeferredCode {
7442 public:
7443  DeferredPostfixCountOperation(Register dst,
7444                                Register old,
7445                                bool is_increment,
7446                                TypeInfo input_type)
7447      : dst_(dst),
7448        old_(old),
7449        is_increment_(is_increment),
7450        input_type_(input_type) {
7451    set_comment("[ DeferredCountOperation");
7452  }
7453
7454  virtual void Generate();
7455
7456 private:
7457  Register dst_;
7458  Register old_;
7459  bool is_increment_;
7460  TypeInfo input_type_;
7461};
7462
7463
7464void DeferredPostfixCountOperation::Generate() {
7465  Register left;
7466  if (input_type_.IsNumber()) {
7467    __ push(dst_);  // Save the input to use as the old value.
7468    left = dst_;
7469  } else {
7470    __ push(dst_);
7471    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
7472    __ push(rax);  // Save the result of ToNumber to use as the old value.
7473    left = rax;
7474  }
7475
7476  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
7477                           NO_OVERWRITE,
7478                           NO_GENERIC_BINARY_FLAGS,
7479                           TypeInfo::Number());
7480  stub.GenerateCall(masm_, left, Smi::FromInt(1));
7481
7482  if (!dst_.is(rax)) __ movq(dst_, rax);
7483  __ pop(old_);
7484}
7485
7486
7487void CodeGenerator::VisitCountOperation(CountOperation* node) {
7488  Comment cmnt(masm_, "[ CountOperation");
7489
7490  bool is_postfix = node->is_postfix();
7491  bool is_increment = node->op() == Token::INC;
7492
7493  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
7494  bool is_const = (var != NULL && var->mode() == Variable::CONST);
7495
7496  // Postfix operations need a stack slot under the reference to hold
7497  // the old value while the new value is being stored.  This is so that
7498  // in the case that storing the new value requires a call, the old
7499  // value will be in the frame to be spilled.
7500  if (is_postfix) frame_->Push(Smi::FromInt(0));
7501
7502  // A constant reference is not saved to, so the reference is not a
7503  // compound assignment reference.
7504  { Reference target(this, node->expression(), !is_const);
7505    if (target.is_illegal()) {
7506      // Spoof the virtual frame to have the expected height (one higher
7507      // than on entry).
7508      if (!is_postfix) frame_->Push(Smi::FromInt(0));
7509      return;
7510    }
7511    target.TakeValue();
7512
7513    Result new_value = frame_->Pop();
7514    new_value.ToRegister();
7515
7516    Result old_value;  // Only allocated in the postfix case.
7517    if (is_postfix) {
7518      // Allocate a temporary to preserve the old value.
7519      old_value = allocator_->Allocate();
7520      ASSERT(old_value.is_valid());
7521      __ movq(old_value.reg(), new_value.reg());
7522
7523      // The return value for postfix operations is ToNumber(input).
7524      // Keep more precise type info if the input is some kind of
7525      // number already. If the input is not a number we have to wait
7526      // for the deferred code to convert it.
7527      if (new_value.type_info().IsNumber()) {
7528        old_value.set_type_info(new_value.type_info());
7529      }
7530    }
7531    // Ensure the new value is writable.
7532    frame_->Spill(new_value.reg());
7533
7534    DeferredCode* deferred = NULL;
7535    if (is_postfix) {
7536      deferred = new DeferredPostfixCountOperation(new_value.reg(),
7537                                                   old_value.reg(),
7538                                                   is_increment,
7539                                                   new_value.type_info());
7540    } else {
7541      deferred = new DeferredPrefixCountOperation(new_value.reg(),
7542                                                  is_increment,
7543                                                  new_value.type_info());
7544    }
7545
7546    if (new_value.is_smi()) {
7547      if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
7548    } else {
7549      __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
7550    }
7551    if (is_increment) {
7552      __ SmiAddConstant(new_value.reg(),
7553                        new_value.reg(),
7554                        Smi::FromInt(1),
7555                        deferred->entry_label());
7556    } else {
7557      __ SmiSubConstant(new_value.reg(),
7558                        new_value.reg(),
7559                        Smi::FromInt(1),
7560                        deferred->entry_label());
7561    }
7562    deferred->BindExit();
7563
7564    // Postfix count operations return their input converted to
7565    // number. The case when the input is already a number is covered
7566    // above in the allocation code for old_value.
7567    if (is_postfix && !new_value.type_info().IsNumber()) {
7568      old_value.set_type_info(TypeInfo::Number());
7569    }
7570
7571    new_value.set_type_info(TypeInfo::Number());
7572
7573    // Postfix: store the old value in the allocated slot under the
7574    // reference.
7575    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
7576
7577    frame_->Push(&new_value);
7578    // Non-constant: update the reference.
7579    if (!is_const) target.SetValue(NOT_CONST_INIT);
7580  }
7581
7582  // Postfix: drop the new value and use the old.
7583  if (is_postfix) frame_->Drop();
7584}
7585
7586
7587void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
7588  // According to ECMA-262 section 11.11, page 58, the binary logical
7589  // operators must yield the result of one of the two expressions
7590  // before any ToBoolean() conversions. This means that the value
7591  // produced by a && or || operator is not necessarily a boolean.
7592
7593  // NOTE: If the left hand side produces a materialized value (not
7594  // control flow), we force the right hand side to do the same. This
7595  // is necessary because we assume that if we get control flow on the
7596  // last path out of an expression we got it on all paths.
7597  if (node->op() == Token::AND) {
7598    JumpTarget is_true;
7599    ControlDestination dest(&is_true, destination()->false_target(), true);
7600    LoadCondition(node->left(), &dest, false);
7601
7602    if (dest.false_was_fall_through()) {
7603      // The current false target was used as the fall-through.  If
7604      // there are no dangling jumps to is_true then the left
7605      // subexpression was unconditionally false.  Otherwise we have
7606      // paths where we do have to evaluate the right subexpression.
7607      if (is_true.is_linked()) {
7608        // We need to compile the right subexpression.  If the jump to
7609        // the current false target was a forward jump then we have a
7610        // valid frame, we have just bound the false target, and we
7611        // have to jump around the code for the right subexpression.
7612        if (has_valid_frame()) {
7613          destination()->false_target()->Unuse();
7614          destination()->false_target()->Jump();
7615        }
7616        is_true.Bind();
7617        // The left subexpression compiled to control flow, so the
7618        // right one is free to do so as well.
7619        LoadCondition(node->right(), destination(), false);
7620      } else {
7621        // We have actually just jumped to or bound the current false
7622        // target but the current control destination is not marked as
7623        // used.
7624        destination()->Use(false);
7625      }
7626
7627    } else if (dest.is_used()) {
7628      // The left subexpression compiled to control flow (and is_true
7629      // was just bound), so the right is free to do so as well.
7630      LoadCondition(node->right(), destination(), false);
7631
7632    } else {
7633      // We have a materialized value on the frame, so we exit with
7634      // one on all paths.  There are possibly also jumps to is_true
7635      // from nested subexpressions.
7636      JumpTarget pop_and_continue;
7637      JumpTarget exit;
7638
7639      // Avoid popping the result if it converts to 'false' using the
7640      // standard ToBoolean() conversion as described in ECMA-262,
7641      // section 9.2, page 30.
7642      //
7643      // Duplicate the TOS value. The duplicate will be popped by
7644      // ToBoolean.
7645      frame_->Dup();
7646      ControlDestination dest(&pop_and_continue, &exit, true);
7647      ToBoolean(&dest);
7648
7649      // Pop the result of evaluating the first part.
7650      frame_->Drop();
7651
7652      // Compile right side expression.
7653      is_true.Bind();
7654      Load(node->right());
7655
7656      // Exit (always with a materialized value).
7657      exit.Bind();
7658    }
7659
7660  } else {
7661    ASSERT(node->op() == Token::OR);
7662    JumpTarget is_false;
7663    ControlDestination dest(destination()->true_target(), &is_false, false);
7664    LoadCondition(node->left(), &dest, false);
7665
7666    if (dest.true_was_fall_through()) {
7667      // The current true target was used as the fall-through.  If
7668      // there are no dangling jumps to is_false then the left
7669      // subexpression was unconditionally true.  Otherwise we have
7670      // paths where we do have to evaluate the right subexpression.
7671      if (is_false.is_linked()) {
7672        // We need to compile the right subexpression.  If the jump to
7673        // the current true target was a forward jump then we have a
7674        // valid frame, we have just bound the true target, and we
7675        // have to jump around the code for the right subexpression.
7676        if (has_valid_frame()) {
7677          destination()->true_target()->Unuse();
7678          destination()->true_target()->Jump();
7679        }
7680        is_false.Bind();
7681        // The left subexpression compiled to control flow, so the
7682        // right one is free to do so as well.
7683        LoadCondition(node->right(), destination(), false);
7684      } else {
7685        // We have just jumped to or bound the current true target but
7686        // the current control destination is not marked as used.
7687        destination()->Use(true);
7688      }
7689
7690    } else if (dest.is_used()) {
7691      // The left subexpression compiled to control flow (and is_false
7692      // was just bound), so the right is free to do so as well.
7693      LoadCondition(node->right(), destination(), false);
7694
7695    } else {
7696      // We have a materialized value on the frame, so we exit with
7697      // one on all paths.  There are possibly also jumps to is_false
7698      // from nested subexpressions.
7699      JumpTarget pop_and_continue;
7700      JumpTarget exit;
7701
7702      // Avoid popping the result if it converts to 'true' using the
7703      // standard ToBoolean() conversion as described in ECMA-262,
7704      // section 9.2, page 30.
7705      //
7706      // Duplicate the TOS value. The duplicate will be popped by
7707      // ToBoolean.
7708      frame_->Dup();
7709      ControlDestination dest(&exit, &pop_and_continue, false);
7710      ToBoolean(&dest);
7711
7712      // Pop the result of evaluating the first part.
7713      frame_->Drop();
7714
7715      // Compile right side expression.
7716      is_false.Bind();
7717      Load(node->right());
7718
7719      // Exit (always with a materialized value).
7720      exit.Bind();
7721    }
7722  }
7723}
7724
7725void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
7726  Comment cmnt(masm_, "[ BinaryOperation");
7727
7728  if (node->op() == Token::AND || node->op() == Token::OR) {
7729    GenerateLogicalBooleanOperation(node);
7730  } else {
7731    // NOTE: The code below assumes that the slow cases (calls to runtime)
7732    // never return a constant/immutable object.
7733    OverwriteMode overwrite_mode = NO_OVERWRITE;
7734    if (node->left()->ResultOverwriteAllowed()) {
7735      overwrite_mode = OVERWRITE_LEFT;
7736    } else if (node->right()->ResultOverwriteAllowed()) {
7737      overwrite_mode = OVERWRITE_RIGHT;
7738    }
7739
7740    if (node->left()->IsTrivial()) {
7741      Load(node->right());
7742      Result right = frame_->Pop();
7743      frame_->Push(node->left());
7744      frame_->Push(&right);
7745    } else {
7746      Load(node->left());
7747      Load(node->right());
7748    }
7749    GenericBinaryOperation(node, overwrite_mode);
7750  }
7751}
7752
7753
7754void CodeGenerator::VisitThisFunction(ThisFunction* node) {
7755  frame_->PushFunction();
7756}
7757
7758
7759void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
7760  Comment cmnt(masm_, "[ CompareOperation");
7761
7762  // Get the expressions from the node.
7763  Expression* left = node->left();
7764  Expression* right = node->right();
7765  Token::Value op = node->op();
7766  // To make typeof testing for natives implemented in JavaScript really
7767  // efficient, we generate special code for expressions of the form:
7768  // 'typeof <expression> == <string>'.
7769  UnaryOperation* operation = left->AsUnaryOperation();
7770  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
7771      (operation != NULL && operation->op() == Token::TYPEOF) &&
7772      (right->AsLiteral() != NULL &&
7773       right->AsLiteral()->handle()->IsString())) {
7774    Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
7775
7776    // Load the operand and move it to a register.
7777    LoadTypeofExpression(operation->expression());
7778    Result answer = frame_->Pop();
7779    answer.ToRegister();
7780
7781    if (check->Equals(HEAP->number_symbol())) {
7782      Condition is_smi = masm_->CheckSmi(answer.reg());
7783      destination()->true_target()->Branch(is_smi);
7784      frame_->Spill(answer.reg());
7785      __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
7786      __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
7787      answer.Unuse();
7788      destination()->Split(equal);
7789
7790    } else if (check->Equals(HEAP->string_symbol())) {
7791      Condition is_smi = masm_->CheckSmi(answer.reg());
7792      destination()->false_target()->Branch(is_smi);
7793
7794      // It can be an undetectable string object.
7795      __ movq(kScratchRegister,
7796              FieldOperand(answer.reg(), HeapObject::kMapOffset));
7797      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
7798               Immediate(1 << Map::kIsUndetectable));
7799      destination()->false_target()->Branch(not_zero);
7800      __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
7801      answer.Unuse();
7802      destination()->Split(below);  // Unsigned byte comparison needed.
7803
7804    } else if (check->Equals(HEAP->boolean_symbol())) {
7805      __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
7806      destination()->true_target()->Branch(equal);
7807      __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
7808      answer.Unuse();
7809      destination()->Split(equal);
7810
7811    } else if (check->Equals(HEAP->undefined_symbol())) {
7812      __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
7813      destination()->true_target()->Branch(equal);
7814
7815      Condition is_smi = masm_->CheckSmi(answer.reg());
7816      destination()->false_target()->Branch(is_smi);
7817
7818      // It can be an undetectable object.
7819      __ movq(kScratchRegister,
7820              FieldOperand(answer.reg(), HeapObject::kMapOffset));
7821      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
7822               Immediate(1 << Map::kIsUndetectable));
7823      answer.Unuse();
7824      destination()->Split(not_zero);
7825
7826    } else if (check->Equals(HEAP->function_symbol())) {
7827      Condition is_smi = masm_->CheckSmi(answer.reg());
7828      destination()->false_target()->Branch(is_smi);
7829      frame_->Spill(answer.reg());
7830      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
7831      destination()->true_target()->Branch(equal);
7832      // Regular expressions are callable so typeof == 'function'.
7833      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
7834      answer.Unuse();
7835      destination()->Split(equal);
7836
7837    } else if (check->Equals(HEAP->object_symbol())) {
7838      Condition is_smi = masm_->CheckSmi(answer.reg());
7839      destination()->false_target()->Branch(is_smi);
7840      __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
7841      destination()->true_target()->Branch(equal);
7842
7843      // Regular expressions are typeof == 'function', not 'object'.
7844      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
7845      destination()->false_target()->Branch(equal);
7846
7847      // It can be an undetectable object.
7848      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
7849               Immediate(1 << Map::kIsUndetectable));
7850      destination()->false_target()->Branch(not_zero);
7851      __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
7852      destination()->false_target()->Branch(below);
7853      __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
7854      answer.Unuse();
7855      destination()->Split(below_equal);
7856    } else {
7857      // Uncommon case: typeof testing against a string literal that is
7858      // never returned from the typeof operator.
7859      answer.Unuse();
7860      destination()->Goto(false);
7861    }
7862    return;
7863  }
7864
7865  Condition cc = no_condition;
7866  bool strict = false;
7867  switch (op) {
7868    case Token::EQ_STRICT:
7869      strict = true;
7870      // Fall through
7871    case Token::EQ:
7872      cc = equal;
7873      break;
7874    case Token::LT:
7875      cc = less;
7876      break;
7877    case Token::GT:
7878      cc = greater;
7879      break;
7880    case Token::LTE:
7881      cc = less_equal;
7882      break;
7883    case Token::GTE:
7884      cc = greater_equal;
7885      break;
7886    case Token::IN: {
7887      Load(left);
7888      Load(right);
7889      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
7890      frame_->Push(&answer);  // push the result
7891      return;
7892    }
7893    case Token::INSTANCEOF: {
7894      Load(left);
7895      Load(right);
7896      InstanceofStub stub(InstanceofStub::kNoFlags);
7897      Result answer = frame_->CallStub(&stub, 2);
7898      answer.ToRegister();
7899      __ testq(answer.reg(), answer.reg());
7900      answer.Unuse();
7901      destination()->Split(zero);
7902      return;
7903    }
7904    default:
7905      UNREACHABLE();
7906  }
7907
7908  if (left->IsTrivial()) {
7909    Load(right);
7910    Result right_result = frame_->Pop();
7911    frame_->Push(left);
7912    frame_->Push(&right_result);
7913  } else {
7914    Load(left);
7915    Load(right);
7916  }
7917
7918  Comparison(node, cc, strict, destination());
7919}
7920
7921
7922void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
7923  Comment cmnt(masm_, "[ CompareToNull");
7924
7925  Load(node->expression());
7926  Result operand = frame_->Pop();
7927  operand.ToRegister();
7928  __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
7929  if (node->is_strict()) {
7930    operand.Unuse();
7931    destination()->Split(equal);
7932  } else {
7933    // The 'null' value is only equal to 'undefined' if using non-strict
7934    // comparisons.
7935    destination()->true_target()->Branch(equal);
7936    __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
7937    destination()->true_target()->Branch(equal);
7938    Condition is_smi = masm_->CheckSmi(operand.reg());
7939    destination()->false_target()->Branch(is_smi);
7940
7941    // It can be an undetectable object.
7942    // Use a scratch register in preference to spilling operand.reg().
7943    Result temp = allocator()->Allocate();
7944    ASSERT(temp.is_valid());
7945    __ movq(temp.reg(),
7946            FieldOperand(operand.reg(), HeapObject::kMapOffset));
7947    __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
7948             Immediate(1 << Map::kIsUndetectable));
7949    temp.Unuse();
7950    operand.Unuse();
7951    destination()->Split(not_zero);
7952  }
7953}
7954
7955
7956#ifdef DEBUG
7957bool CodeGenerator::HasValidEntryRegisters() {
7958  return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
7959      && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
7960      && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
7961      && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
7962      && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
7963      && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
7964      && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
7965      && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
7966      && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
7967      && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0));
7968}
7969#endif
7970
7971
7972
7973// Emit a LoadIC call to get the value from receiver and leave it in
7974// dst.  The receiver register is restored after the call.
7975class DeferredReferenceGetNamedValue: public DeferredCode {
7976 public:
7977  DeferredReferenceGetNamedValue(Register dst,
7978                                 Register receiver,
7979                                 Handle<String> name)
7980      : dst_(dst), receiver_(receiver),  name_(name) {
7981    set_comment("[ DeferredReferenceGetNamedValue");
7982  }
7983
7984  virtual void Generate();
7985
7986  Label* patch_site() { return &patch_site_; }
7987
7988 private:
7989  Label patch_site_;
7990  Register dst_;
7991  Register receiver_;
7992  Handle<String> name_;
7993};
7994
7995
7996void DeferredReferenceGetNamedValue::Generate() {
7997  if (!receiver_.is(rax)) {
7998    __ movq(rax, receiver_);
7999  }
8000  __ Move(rcx, name_);
8001  Handle<Code> ic = Isolate::Current()->builtins()->LoadIC_Initialize();
8002  __ Call(ic, RelocInfo::CODE_TARGET);
8003  // The call must be followed by a test rax instruction to indicate
8004  // that the inobject property case was inlined.
8005  //
8006  // Store the delta to the map check instruction here in the test
8007  // instruction.  Use masm_-> instead of the __ macro since the
8008  // latter can't return a value.
8009  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
8010  // Here we use masm_-> instead of the __ macro because this is the
8011  // instruction that gets patched and coverage code gets in the way.
8012  masm_->testl(rax, Immediate(-delta_to_patch_site));
8013  Counters* counters = masm()->isolate()->counters();
8014  __ IncrementCounter(counters->named_load_inline_miss(), 1);
8015
8016  if (!dst_.is(rax)) __ movq(dst_, rax);
8017}
8018
8019
8020class DeferredReferenceGetKeyedValue: public DeferredCode {
8021 public:
8022  explicit DeferredReferenceGetKeyedValue(Register dst,
8023                                          Register receiver,
8024                                          Register key)
8025      : dst_(dst), receiver_(receiver), key_(key) {
8026    set_comment("[ DeferredReferenceGetKeyedValue");
8027  }
8028
8029  virtual void Generate();
8030
8031  Label* patch_site() { return &patch_site_; }
8032
8033 private:
8034  Label patch_site_;
8035  Register dst_;
8036  Register receiver_;
8037  Register key_;
8038};
8039
8040
8041void DeferredReferenceGetKeyedValue::Generate() {
8042  if (receiver_.is(rdx)) {
8043    if (!key_.is(rax)) {
8044      __ movq(rax, key_);
8045    }  // else do nothing.
8046  } else if (receiver_.is(rax)) {
8047    if (key_.is(rdx)) {
8048      __ xchg(rax, rdx);
8049    } else if (key_.is(rax)) {
8050      __ movq(rdx, receiver_);
8051    } else {
8052      __ movq(rdx, receiver_);
8053      __ movq(rax, key_);
8054    }
8055  } else if (key_.is(rax)) {
8056    __ movq(rdx, receiver_);
8057  } else {
8058    __ movq(rax, key_);
8059    __ movq(rdx, receiver_);
8060  }
8061  // Calculate the delta from the IC call instruction to the map check
8062  // movq instruction in the inlined version.  This delta is stored in
8063  // a test(rax, delta) instruction after the call so that we can find
8064  // it in the IC initialization code and patch the movq instruction.
8065  // This means that we cannot allow test instructions after calls to
8066  // KeyedLoadIC stubs in other places.
8067  Handle<Code> ic = Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
8068  __ Call(ic, RelocInfo::CODE_TARGET);
8069  // The delta from the start of the map-compare instruction to the
8070  // test instruction.  We use masm_-> directly here instead of the __
8071  // macro because the macro sometimes uses macro expansion to turn
8072  // into something that can't return a value.  This is encountered
8073  // when doing generated code coverage tests.
8074  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
8075  // Here we use masm_-> instead of the __ macro because this is the
8076  // instruction that gets patched and coverage code gets in the way.
8077  // TODO(X64): Consider whether it's worth switching the test to a
8078  // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
8079  // be generated normally.
8080  masm_->testl(rax, Immediate(-delta_to_patch_site));
8081  Counters* counters = masm()->isolate()->counters();
8082  __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
8083
8084  if (!dst_.is(rax)) __ movq(dst_, rax);
8085}
8086
8087
8088class DeferredReferenceSetKeyedValue: public DeferredCode {
8089 public:
8090  DeferredReferenceSetKeyedValue(Register value,
8091                                 Register key,
8092                                 Register receiver,
8093                                 StrictModeFlag strict_mode)
8094      : value_(value),
8095        key_(key),
8096        receiver_(receiver),
8097        strict_mode_(strict_mode) {
8098    set_comment("[ DeferredReferenceSetKeyedValue");
8099  }
8100
8101  virtual void Generate();
8102
8103  Label* patch_site() { return &patch_site_; }
8104
8105 private:
8106  Register value_;
8107  Register key_;
8108  Register receiver_;
8109  Label patch_site_;
8110  StrictModeFlag strict_mode_;
8111};
8112
8113
8114void DeferredReferenceSetKeyedValue::Generate() {
8115  Counters* counters = masm()->isolate()->counters();
8116  __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
8117  // Move value, receiver, and key to registers rax, rdx, and rcx, as
8118  // the IC stub expects.
8119  // Move value to rax, using xchg if the receiver or key is in rax.
8120  if (!value_.is(rax)) {
8121    if (!receiver_.is(rax) && !key_.is(rax)) {
8122      __ movq(rax, value_);
8123    } else {
8124      __ xchg(rax, value_);
8125      // Update receiver_ and key_ if they are affected by the swap.
8126      if (receiver_.is(rax)) {
8127        receiver_ = value_;
8128      } else if (receiver_.is(value_)) {
8129        receiver_ = rax;
8130      }
8131      if (key_.is(rax)) {
8132        key_ = value_;
8133      } else if (key_.is(value_)) {
8134        key_ = rax;
8135      }
8136    }
8137  }
8138  // Value is now in rax. Its original location is remembered in value_,
8139  // and the value is restored to value_ before returning.
8140  // The variables receiver_ and key_ are not preserved.
8141  // Move receiver and key to rdx and rcx, swapping if necessary.
8142  if (receiver_.is(rdx)) {
8143    if (!key_.is(rcx)) {
8144      __ movq(rcx, key_);
8145    }  // Else everything is already in the right place.
8146  } else if (receiver_.is(rcx)) {
8147    if (key_.is(rdx)) {
8148      __ xchg(rcx, rdx);
8149    } else if (key_.is(rcx)) {
8150      __ movq(rdx, receiver_);
8151    } else {
8152      __ movq(rdx, receiver_);
8153      __ movq(rcx, key_);
8154    }
8155  } else if (key_.is(rcx)) {
8156    __ movq(rdx, receiver_);
8157  } else {
8158    __ movq(rcx, key_);
8159    __ movq(rdx, receiver_);
8160  }
8161
8162  // Call the IC stub.
8163  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
8164      (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
8165                                    : Builtins::kKeyedStoreIC_Initialize));
8166  __ Call(ic, RelocInfo::CODE_TARGET);
8167  // The delta from the start of the map-compare instructions (initial movq)
8168  // to the test instruction.  We use masm_-> directly here instead of the
8169  // __ macro because the macro sometimes uses macro expansion to turn
8170  // into something that can't return a value.  This is encountered
8171  // when doing generated code coverage tests.
8172  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
8173  // Here we use masm_-> instead of the __ macro because this is the
8174  // instruction that gets patched and coverage code gets in the way.
8175  masm_->testl(rax, Immediate(-delta_to_patch_site));
8176  // Restore value (returned from store IC).
8177  if (!value_.is(rax)) __ movq(value_, rax);
8178}
8179
8180
8181Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
8182#ifdef DEBUG
8183  int original_height = frame()->height();
8184#endif
8185  Result result;
8186  // Do not inline the inobject property case for loads from the global
8187  // object.  Also do not inline for unoptimized code.  This saves time
8188  // in the code generator.  Unoptimized code is toplevel code or code
8189  // that is not in a loop.
8190  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
8191    Comment cmnt(masm(), "[ Load from named Property");
8192    frame()->Push(name);
8193
8194    RelocInfo::Mode mode = is_contextual
8195        ? RelocInfo::CODE_TARGET_CONTEXT
8196        : RelocInfo::CODE_TARGET;
8197    result = frame()->CallLoadIC(mode);
8198    // A test rax instruction following the call signals that the
8199    // inobject property case was inlined.  Ensure that there is not
8200    // a test rax instruction here.
8201    __ nop();
8202  } else {
8203    // Inline the inobject property case.
8204    Comment cmnt(masm(), "[ Inlined named property load");
8205    Result receiver = frame()->Pop();
8206    receiver.ToRegister();
8207    result = allocator()->Allocate();
8208    ASSERT(result.is_valid());
8209
8210    // r12 is now a reserved register, so it cannot be the receiver.
8211    // If it was, the distance to the fixup location would not be constant.
8212    ASSERT(!receiver.reg().is(r12));
8213
8214    DeferredReferenceGetNamedValue* deferred =
8215        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
8216
8217    // Check that the receiver is a heap object.
8218    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
8219
8220    __ bind(deferred->patch_site());
8221    // This is the map check instruction that will be patched (so we can't
8222    // use the double underscore macro that may insert instructions).
8223    // Initially use an invalid map to force a failure.
8224    masm()->movq(kScratchRegister, FACTORY->null_value(),
8225                 RelocInfo::EMBEDDED_OBJECT);
8226    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
8227                 kScratchRegister);
8228    // This branch is always a forwards branch so it's always a fixed
8229    // size which allows the assert below to succeed and patching to work.
8230    // Don't use deferred->Branch(...), since that might add coverage code.
8231    masm()->j(not_equal, deferred->entry_label());
8232
8233    // The delta from the patch label to the load offset must be
8234    // statically known.
8235    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
8236           LoadIC::kOffsetToLoadInstruction);
8237    // The initial (invalid) offset has to be large enough to force
8238    // a 32-bit instruction encoding to allow patching with an
8239    // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
8240    int offset = kMaxInt;
8241    masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
8242
8243    Counters* counters = masm()->isolate()->counters();
8244    __ IncrementCounter(counters->named_load_inline(), 1);
8245    deferred->BindExit();
8246  }
8247  ASSERT(frame()->height() == original_height - 1);
8248  return result;
8249}
8250
8251
8252Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
8253#ifdef DEBUG
8254  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
8255#endif
8256
8257  Result result;
8258  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
8259      result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
8260      // A test rax instruction following the call signals that the inobject
8261      // property case was inlined.  Ensure that there is not a test rax
8262      // instruction here.
8263      __ nop();
8264  } else {
8265    // Inline the in-object property case.
8266    JumpTarget slow, done;
8267    Label patch_site;
8268
8269    // Get the value and receiver from the stack.
8270    Result value = frame()->Pop();
8271    value.ToRegister();
8272    Result receiver = frame()->Pop();
8273    receiver.ToRegister();
8274
8275    // Allocate result register.
8276    result = allocator()->Allocate();
8277    ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
8278
8279    // Cannot use r12 for receiver, because that changes
8280    // the distance between a call and a fixup location,
8281    // due to a special encoding of r12 as r/m in a ModR/M byte.
8282    if (receiver.reg().is(r12)) {
8283      frame()->Spill(receiver.reg());  // It will be overwritten with result.
8284      // Swap receiver and value.
8285      __ movq(result.reg(), receiver.reg());
8286      Result temp = receiver;
8287      receiver = result;
8288      result = temp;
8289    }
8290
8291    // Check that the receiver is a heap object.
8292    Condition is_smi = masm()->CheckSmi(receiver.reg());
8293    slow.Branch(is_smi, &value, &receiver);
8294
8295    // This is the map check instruction that will be patched.
8296    // Initially use an invalid map to force a failure. The exact
8297    // instruction sequence is important because we use the
8298    // kOffsetToStoreInstruction constant for patching. We avoid using
8299    // the __ macro for the following two instructions because it
8300    // might introduce extra instructions.
8301    __ bind(&patch_site);
8302    masm()->movq(kScratchRegister, FACTORY->null_value(),
8303                 RelocInfo::EMBEDDED_OBJECT);
8304    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
8305                 kScratchRegister);
8306    // This branch is always a forwards branch so it's always a fixed size
8307    // which allows the assert below to succeed and patching to work.
8308    slow.Branch(not_equal, &value, &receiver);
8309
8310    // The delta from the patch label to the store offset must be
8311    // statically known.
8312    ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
8313           StoreIC::kOffsetToStoreInstruction);
8314
8315    // The initial (invalid) offset has to be large enough to force a 32-bit
8316    // instruction encoding to allow patching with an arbitrary offset.  Use
8317    // kMaxInt (minus kHeapObjectTag).
8318    int offset = kMaxInt;
8319    __ movq(FieldOperand(receiver.reg(), offset), value.reg());
8320    __ movq(result.reg(), value.reg());
8321
8322    // Allocate scratch register for write barrier.
8323    Result scratch = allocator()->Allocate();
8324    ASSERT(scratch.is_valid());
8325
8326    // The write barrier clobbers all input registers, so spill the
8327    // receiver and the value.
8328    frame_->Spill(receiver.reg());
8329    frame_->Spill(value.reg());
8330
8331    // If the receiver and the value share a register allocate a new
8332    // register for the receiver.
8333    if (receiver.reg().is(value.reg())) {
8334      receiver = allocator()->Allocate();
8335      ASSERT(receiver.is_valid());
8336      __ movq(receiver.reg(), value.reg());
8337    }
8338
8339    // Update the write barrier. To save instructions in the inlined
8340    // version we do not filter smis.
8341    Label skip_write_barrier;
8342    __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
8343    int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
8344    __ lea(scratch.reg(), Operand(receiver.reg(), offset));
8345    __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
8346    if (FLAG_debug_code) {
8347      __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
8348      __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
8349      __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
8350    }
8351    __ bind(&skip_write_barrier);
8352    value.Unuse();
8353    scratch.Unuse();
8354    receiver.Unuse();
8355    done.Jump(&result);
8356
8357    slow.Bind(&value, &receiver);
8358    frame()->Push(&receiver);
8359    frame()->Push(&value);
8360    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
8361    // Encode the offset to the map check instruction and the offset
8362    // to the write barrier store address computation in a test rax
8363    // instruction.
8364    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
8365    __ testl(rax,
8366             Immediate((delta_to_record_write << 16) | delta_to_patch_site));
8367    done.Bind(&result);
8368  }
8369
8370  ASSERT_EQ(expected_height, frame()->height());
8371  return result;
8372}
8373
8374
8375Result CodeGenerator::EmitKeyedLoad() {
8376#ifdef DEBUG
8377  int original_height = frame()->height();
8378#endif
8379  Result result;
8380  // Inline array load code if inside of a loop.  We do not know
8381  // the receiver map yet, so we initially generate the code with
8382  // a check against an invalid map.  In the inline cache code, we
8383  // patch the map check if appropriate.
8384  if (loop_nesting() > 0) {
8385    Comment cmnt(masm_, "[ Inlined load from keyed Property");
8386
8387    // Use a fresh temporary to load the elements without destroying
8388    // the receiver which is needed for the deferred slow case.
8389    // Allocate the temporary early so that we use rax if it is free.
8390    Result elements = allocator()->Allocate();
8391    ASSERT(elements.is_valid());
8392
8393    Result key = frame_->Pop();
8394    Result receiver = frame_->Pop();
8395    key.ToRegister();
8396    receiver.ToRegister();
8397
8398    // If key and receiver are shared registers on the frame, their values will
8399    // be automatically saved and restored when going to deferred code.
8400    // The result is returned in elements, which is not shared.
8401    DeferredReferenceGetKeyedValue* deferred =
8402        new DeferredReferenceGetKeyedValue(elements.reg(),
8403                                           receiver.reg(),
8404                                           key.reg());
8405
8406    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
8407
8408    // Check that the receiver has the expected map.
8409    // Initially, use an invalid map. The map is patched in the IC
8410    // initialization code.
8411    __ bind(deferred->patch_site());
8412    // Use masm-> here instead of the double underscore macro since extra
8413    // coverage code can interfere with the patching.  Do not use a load
8414    // from the root array to load null_value, since the load must be patched
8415    // with the expected receiver map, which is not in the root array.
8416    masm_->movq(kScratchRegister, FACTORY->null_value(),
8417                RelocInfo::EMBEDDED_OBJECT);
8418    masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
8419                kScratchRegister);
8420    deferred->Branch(not_equal);
8421
8422    __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label());
8423
8424    // Get the elements array from the receiver.
8425    __ movq(elements.reg(),
8426            FieldOperand(receiver.reg(), JSObject::kElementsOffset));
8427    __ AssertFastElements(elements.reg());
8428
8429    // Check that key is within bounds.
8430    __ SmiCompare(key.reg(),
8431                  FieldOperand(elements.reg(), FixedArray::kLengthOffset));
8432    deferred->Branch(above_equal);
8433
8434    // Load and check that the result is not the hole.  We could
8435    // reuse the index or elements register for the value.
8436    //
8437    // TODO(206): Consider whether it makes sense to try some
8438    // heuristic about which register to reuse.  For example, if
8439    // one is rax, the we can reuse that one because the value
8440    // coming from the deferred code will be in rax.
8441    SmiIndex index =
8442        masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
8443    __ movq(elements.reg(),
8444            FieldOperand(elements.reg(),
8445                         index.reg,
8446                         index.scale,
8447                         FixedArray::kHeaderSize));
8448    result = elements;
8449    __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
8450    deferred->Branch(equal);
8451    Counters* counters = masm()->isolate()->counters();
8452    __ IncrementCounter(counters->keyed_load_inline(), 1);
8453
8454    deferred->BindExit();
8455  } else {
8456    Comment cmnt(masm_, "[ Load from keyed Property");
8457    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
8458    // Make sure that we do not have a test instruction after the
8459    // call.  A test instruction after the call is used to
8460    // indicate that we have generated an inline version of the
8461    // keyed load.  The explicit nop instruction is here because
8462    // the push that follows might be peep-hole optimized away.
8463    __ nop();
8464  }
8465  ASSERT(frame()->height() == original_height - 2);
8466  return result;
8467}
8468
8469
8470Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
8471#ifdef DEBUG
8472  int original_height = frame()->height();
8473#endif
8474  Result result;
8475  // Generate inlined version of the keyed store if the code is in a loop
8476  // and the key is likely to be a smi.
8477  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
8478    Comment cmnt(masm(), "[ Inlined store to keyed Property");
8479
8480    // Get the receiver, key and value into registers.
8481    result = frame()->Pop();
8482    Result key = frame()->Pop();
8483    Result receiver = frame()->Pop();
8484
8485    Result tmp = allocator_->Allocate();
8486    ASSERT(tmp.is_valid());
8487    Result tmp2 = allocator_->Allocate();
8488    ASSERT(tmp2.is_valid());
8489
8490    // Determine whether the value is a constant before putting it in a
8491    // register.
8492    bool value_is_constant = result.is_constant();
8493
8494    // Make sure that value, key and receiver are in registers.
8495    result.ToRegister();
8496    key.ToRegister();
8497    receiver.ToRegister();
8498
8499    DeferredReferenceSetKeyedValue* deferred =
8500        new DeferredReferenceSetKeyedValue(result.reg(),
8501                                           key.reg(),
8502                                           receiver.reg(),
8503                                           strict_mode_flag());
8504
8505    // Check that the receiver is not a smi.
8506    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
8507
8508    // Check that the key is a smi.
8509    if (!key.is_smi()) {
8510      __ JumpIfNotSmi(key.reg(), deferred->entry_label());
8511    } else if (FLAG_debug_code) {
8512      __ AbortIfNotSmi(key.reg());
8513    }
8514
8515    // Check that the receiver is a JSArray.
8516    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
8517    deferred->Branch(not_equal);
8518
8519    // Get the elements array from the receiver and check that it is not a
8520    // dictionary.
8521    __ movq(tmp.reg(),
8522            FieldOperand(receiver.reg(), JSArray::kElementsOffset));
8523
8524    // Check whether it is possible to omit the write barrier. If the elements
8525    // array is in new space or the value written is a smi we can safely update
8526    // the elements array without write barrier.
8527    Label in_new_space;
8528    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
8529    if (!value_is_constant) {
8530      __ JumpIfNotSmi(result.reg(), deferred->entry_label());
8531    }
8532
8533    __ bind(&in_new_space);
8534    // Bind the deferred code patch site to be able to locate the fixed
8535    // array map comparison.  When debugging, we patch this comparison to
8536    // always fail so that we will hit the IC call in the deferred code
8537    // which will allow the debugger to break for fast case stores.
8538    __ bind(deferred->patch_site());
8539    // Avoid using __ to ensure the distance from patch_site
8540    // to the map address is always the same.
8541    masm()->movq(kScratchRegister, FACTORY->fixed_array_map(),
8542               RelocInfo::EMBEDDED_OBJECT);
8543    __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
8544            kScratchRegister);
8545    deferred->Branch(not_equal);
8546
8547    // Check that the key is within bounds.  Both the key and the length of
8548    // the JSArray are smis (because the fixed array check above ensures the
8549    // elements are in fast case). Use unsigned comparison to handle negative
8550    // keys.
8551    __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
8552                  key.reg());
8553    deferred->Branch(below_equal);
8554
8555    // Store the value.
8556    SmiIndex index =
8557        masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
8558    __ movq(FieldOperand(tmp.reg(),
8559                         index.reg,
8560                         index.scale,
8561                         FixedArray::kHeaderSize),
8562            result.reg());
8563    Counters* counters = masm()->isolate()->counters();
8564    __ IncrementCounter(counters->keyed_store_inline(), 1);
8565
8566    deferred->BindExit();
8567  } else {
8568    result = frame()->CallKeyedStoreIC(strict_mode_flag());
8569    // Make sure that we do not have a test instruction after the
8570    // call.  A test instruction after the call is used to
8571    // indicate that we have generated an inline version of the
8572    // keyed store.
8573    __ nop();
8574  }
8575  ASSERT(frame()->height() == original_height - 3);
8576  return result;
8577}
8578
8579
8580#undef __
8581#define __ ACCESS_MASM(masm)
8582
8583
8584Handle<String> Reference::GetName() {
8585  ASSERT(type_ == NAMED);
8586  Property* property = expression_->AsProperty();
8587  if (property == NULL) {
8588    // Global variable reference treated as a named property reference.
8589    VariableProxy* proxy = expression_->AsVariableProxy();
8590    ASSERT(proxy->AsVariable() != NULL);
8591    ASSERT(proxy->AsVariable()->is_global());
8592    return proxy->name();
8593  } else {
8594    Literal* raw_name = property->key()->AsLiteral();
8595    ASSERT(raw_name != NULL);
8596    return Handle<String>(String::cast(*raw_name->handle()));
8597  }
8598}
8599
8600
8601void Reference::GetValue() {
8602  ASSERT(!cgen_->in_spilled_code());
8603  ASSERT(cgen_->HasValidEntryRegisters());
8604  ASSERT(!is_illegal());
8605  MacroAssembler* masm = cgen_->masm();
8606
8607  // Record the source position for the property load.
8608  Property* property = expression_->AsProperty();
8609  if (property != NULL) {
8610    cgen_->CodeForSourcePosition(property->position());
8611  }
8612
8613  switch (type_) {
8614    case SLOT: {
8615      Comment cmnt(masm, "[ Load from Slot");
8616      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
8617      ASSERT(slot != NULL);
8618      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
8619      break;
8620    }
8621
8622    case NAMED: {
8623      Variable* var = expression_->AsVariableProxy()->AsVariable();
8624      bool is_global = var != NULL;
8625      ASSERT(!is_global || var->is_global());
8626      if (persist_after_get_) {
8627        cgen_->frame()->Dup();
8628      }
8629      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
8630      cgen_->frame()->Push(&result);
8631      break;
8632    }
8633
8634    case KEYED: {
8635      // A load of a bare identifier (load from global) cannot be keyed.
8636      ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
8637      if (persist_after_get_) {
8638        cgen_->frame()->PushElementAt(1);
8639        cgen_->frame()->PushElementAt(1);
8640      }
8641      Result value = cgen_->EmitKeyedLoad();
8642      cgen_->frame()->Push(&value);
8643      break;
8644    }
8645
8646    default:
8647      UNREACHABLE();
8648  }
8649
8650  if (!persist_after_get_) {
8651    set_unloaded();
8652  }
8653}
8654
8655
8656void Reference::TakeValue() {
8657  // TODO(X64): This function is completely architecture independent. Move
8658  // it somewhere shared.
8659
8660  // For non-constant frame-allocated slots, we invalidate the value in the
8661  // slot.  For all others, we fall back on GetValue.
8662  ASSERT(!cgen_->in_spilled_code());
8663  ASSERT(!is_illegal());
8664  if (type_ != SLOT) {
8665    GetValue();
8666    return;
8667  }
8668
8669  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
8670  ASSERT(slot != NULL);
8671  if (slot->type() == Slot::LOOKUP ||
8672      slot->type() == Slot::CONTEXT ||
8673      slot->var()->mode() == Variable::CONST ||
8674      slot->is_arguments()) {
8675    GetValue();
8676    return;
8677  }
8678
8679  // Only non-constant, frame-allocated parameters and locals can reach
8680  // here.  Be careful not to use the optimizations for arguments
8681  // object access since it may not have been initialized yet.
8682  ASSERT(!slot->is_arguments());
8683  if (slot->type() == Slot::PARAMETER) {
8684    cgen_->frame()->TakeParameterAt(slot->index());
8685  } else {
8686    ASSERT(slot->type() == Slot::LOCAL);
8687    cgen_->frame()->TakeLocalAt(slot->index());
8688  }
8689
8690  ASSERT(persist_after_get_);
8691  // Do not unload the reference, because it is used in SetValue.
8692}
8693
8694
8695void Reference::SetValue(InitState init_state) {
8696  ASSERT(cgen_->HasValidEntryRegisters());
8697  ASSERT(!is_illegal());
8698  MacroAssembler* masm = cgen_->masm();
8699  switch (type_) {
8700    case SLOT: {
8701      Comment cmnt(masm, "[ Store to Slot");
8702      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
8703      ASSERT(slot != NULL);
8704      cgen_->StoreToSlot(slot, init_state);
8705      set_unloaded();
8706      break;
8707    }
8708
8709    case NAMED: {
8710      Comment cmnt(masm, "[ Store to named Property");
8711      Result answer = cgen_->EmitNamedStore(GetName(), false);
8712      cgen_->frame()->Push(&answer);
8713      set_unloaded();
8714      break;
8715    }
8716
8717    case KEYED: {
8718      Comment cmnt(masm, "[ Store to keyed Property");
8719      Property* property = expression()->AsProperty();
8720      ASSERT(property != NULL);
8721
8722      Result answer = cgen_->EmitKeyedStore(property->key()->type());
8723      cgen_->frame()->Push(&answer);
8724      set_unloaded();
8725      break;
8726    }
8727
8728    case UNLOADED:
8729    case ILLEGAL:
8730      UNREACHABLE();
8731  }
8732}
8733
8734
8735Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
8736                                                      Result* left,
8737                                                      Result* right) {
8738  if (stub->ArgsInRegistersSupported()) {
8739    stub->SetArgsInRegisters();
8740    return frame_->CallStub(stub, left, right);
8741  } else {
8742    frame_->Push(left);
8743    frame_->Push(right);
8744    return frame_->CallStub(stub, 2);
8745  }
8746}
8747
8748#undef __
8749
8750#define __ masm.
8751
8752#ifdef _WIN64
8753typedef double (*ModuloFunction)(double, double);
8754// Define custom fmod implementation.
8755ModuloFunction CreateModuloFunction() {
8756  size_t actual_size;
8757  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
8758                                                 &actual_size,
8759                                                 true));
8760  CHECK(buffer);
8761  Assembler masm(buffer, static_cast<int>(actual_size));
8762  // Generated code is put into a fixed, unmovable, buffer, and not into
8763  // the V8 heap. We can't, and don't, refer to any relocatable addresses
8764  // (e.g. the JavaScript nan-object).
8765
8766  // Windows 64 ABI passes double arguments in xmm0, xmm1 and
8767  // returns result in xmm0.
8768  // Argument backing space is allocated on the stack above
8769  // the return address.
8770
8771  // Compute x mod y.
8772  // Load y and x (use argument backing store as temporary storage).
8773  __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
8774  __ movsd(Operand(rsp, kPointerSize), xmm0);
8775  __ fld_d(Operand(rsp, kPointerSize * 2));
8776  __ fld_d(Operand(rsp, kPointerSize));
8777
8778  // Clear exception flags before operation.
8779  {
8780    Label no_exceptions;
8781    __ fwait();
8782    __ fnstsw_ax();
8783    // Clear if Illegal Operand or Zero Division exceptions are set.
8784    __ testb(rax, Immediate(5));
8785    __ j(zero, &no_exceptions);
8786    __ fnclex();
8787    __ bind(&no_exceptions);
8788  }
8789
8790  // Compute st(0) % st(1)
8791  {
8792    Label partial_remainder_loop;
8793    __ bind(&partial_remainder_loop);
8794    __ fprem();
8795    __ fwait();
8796    __ fnstsw_ax();
8797    __ testl(rax, Immediate(0x400 /* C2 */));
8798    // If C2 is set, computation only has partial result. Loop to
8799    // continue computation.
8800    __ j(not_zero, &partial_remainder_loop);
8801  }
8802
8803  Label valid_result;
8804  Label return_result;
8805  // If Invalid Operand or Zero Division exceptions are set,
8806  // return NaN.
8807  __ testb(rax, Immediate(5));
8808  __ j(zero, &valid_result);
8809  __ fstp(0);  // Drop result in st(0).
8810  int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
8811  __ movq(rcx, kNaNValue, RelocInfo::NONE);
8812  __ movq(Operand(rsp, kPointerSize), rcx);
8813  __ movsd(xmm0, Operand(rsp, kPointerSize));
8814  __ jmp(&return_result);
8815
8816  // If result is valid, return that.
8817  __ bind(&valid_result);
8818  __ fstp_d(Operand(rsp, kPointerSize));
8819  __ movsd(xmm0, Operand(rsp, kPointerSize));
8820
8821  // Clean up FPU stack and exceptions and return xmm0
8822  __ bind(&return_result);
8823  __ fstp(0);  // Unload y.
8824
8825  Label clear_exceptions;
8826  __ testb(rax, Immediate(0x3f /* Any Exception*/));
8827  __ j(not_zero, &clear_exceptions);
8828  __ ret(0);
8829  __ bind(&clear_exceptions);
8830  __ fnclex();
8831  __ ret(0);
8832
8833  CodeDesc desc;
8834  masm.GetCode(&desc);
8835  // Call the function from C++.
8836  return FUNCTION_CAST<ModuloFunction>(buffer);
8837}
8838
8839#endif
8840
8841
8842#undef __
8843
8844} }  // namespace v8::internal
8845
8846#endif  // V8_TARGET_ARCH_X64
8847