codegen-x64.cc revision d91b9f7d46489a9ee00f9cb415630299c76a502b
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
32#include "compiler.h"
33#include "debug.h"
34#include "ic-inl.h"
35#include "parser.h"
36#include "register-allocator-inl.h"
37#include "scopes.h"
38
39namespace v8 {
40namespace internal {
41
42#define __ ACCESS_MASM(masm_)
43
44// -------------------------------------------------------------------------
45// Platform-specific DeferredCode functions.
46
47void DeferredCode::SaveRegisters() {
48  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
49    int action = registers_[i];
50    if (action == kPush) {
51      __ push(RegisterAllocator::ToRegister(i));
52    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
53      __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
54    }
55  }
56}
57
58
59void DeferredCode::RestoreRegisters() {
60  // Restore registers in reverse order due to the stack.
61  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
62    int action = registers_[i];
63    if (action == kPush) {
64      __ pop(RegisterAllocator::ToRegister(i));
65    } else if (action != kIgnore) {
66      action &= ~kSyncedFlag;
67      __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
68    }
69  }
70}
71
72
73// -------------------------------------------------------------------------
74// CodeGenState implementation.
75
76CodeGenState::CodeGenState(CodeGenerator* owner)
77    : owner_(owner),
78      destination_(NULL),
79      previous_(NULL) {
80  owner_->set_state(this);
81}
82
83
84CodeGenState::CodeGenState(CodeGenerator* owner,
85                           ControlDestination* destination)
86    : owner_(owner),
87      destination_(destination),
88      previous_(owner->state()) {
89  owner_->set_state(this);
90}
91
92
93CodeGenState::~CodeGenState() {
94  ASSERT(owner_->state() == this);
95  owner_->set_state(previous_);
96}
97
98
99// -------------------------------------------------------------------------
100// Deferred code objects
101//
102// These subclasses of DeferredCode add pieces of code to the end of generated
103// code.  They are branched to from the generated code, and
104// keep some slower code out of the main body of the generated code.
105// Many of them call a code stub or a runtime function.
106
107class DeferredInlineSmiAdd: public DeferredCode {
108 public:
109  DeferredInlineSmiAdd(Register dst,
110                       Smi* value,
111                       OverwriteMode overwrite_mode)
112      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
113    set_comment("[ DeferredInlineSmiAdd");
114  }
115
116  virtual void Generate();
117
118 private:
119  Register dst_;
120  Smi* value_;
121  OverwriteMode overwrite_mode_;
122};
123
124
125// The result of value + src is in dst.  It either overflowed or was not
126// smi tagged.  Undo the speculative addition and call the appropriate
127// specialized stub for add.  The result is left in dst.
128class DeferredInlineSmiAddReversed: public DeferredCode {
129 public:
130  DeferredInlineSmiAddReversed(Register dst,
131                               Smi* value,
132                               OverwriteMode overwrite_mode)
133      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
134    set_comment("[ DeferredInlineSmiAddReversed");
135  }
136
137  virtual void Generate();
138
139 private:
140  Register dst_;
141  Smi* value_;
142  OverwriteMode overwrite_mode_;
143};
144
145
146class DeferredInlineSmiSub: public DeferredCode {
147 public:
148  DeferredInlineSmiSub(Register dst,
149                       Smi* value,
150                       OverwriteMode overwrite_mode)
151      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
152    set_comment("[ DeferredInlineSmiSub");
153  }
154
155  virtual void Generate();
156
157 private:
158  Register dst_;
159  Smi* value_;
160  OverwriteMode overwrite_mode_;
161};
162
163
164// Call the appropriate binary operation stub to compute src op value
165// and leave the result in dst.
166class DeferredInlineSmiOperation: public DeferredCode {
167 public:
168  DeferredInlineSmiOperation(Token::Value op,
169                             Register dst,
170                             Register src,
171                             Smi* value,
172                             OverwriteMode overwrite_mode)
173      : op_(op),
174        dst_(dst),
175        src_(src),
176        value_(value),
177        overwrite_mode_(overwrite_mode) {
178    set_comment("[ DeferredInlineSmiOperation");
179  }
180
181  virtual void Generate();
182
183 private:
184  Token::Value op_;
185  Register dst_;
186  Register src_;
187  Smi* value_;
188  OverwriteMode overwrite_mode_;
189};
190
191
192class FloatingPointHelper : public AllStatic {
193 public:
194  // Code pattern for loading a floating point value. Input value must
195  // be either a smi or a heap number object (fp value). Requirements:
196  // operand on TOS+1. Returns operand as floating point number on FPU
197  // stack.
198  static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
199
200  // Code pattern for loading a floating point value. Input value must
201  // be either a smi or a heap number object (fp value). Requirements:
202  // operand in src register. Returns operand as floating point number
203  // in XMM register
204  static void LoadFloatOperand(MacroAssembler* masm,
205                               Register src,
206                               XMMRegister dst);
207
208  // Code pattern for loading floating point values. Input values must
209  // be either smi or heap number objects (fp values). Requirements:
210  // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
211  // floating point numbers in XMM registers.
212  static void LoadFloatOperands(MacroAssembler* masm,
213                                XMMRegister dst1,
214                                XMMRegister dst2);
215
216  // Code pattern for loading floating point values onto the fp stack.
217  // Input values must be either smi or heap number objects (fp values).
218  // Requirements:
219  // Register version: operands in registers lhs and rhs.
220  // Stack version: operands on TOS+1 and TOS+2.
221  // Returns operands as floating point numbers on fp stack.
222  static void LoadFloatOperands(MacroAssembler* masm);
223  static void LoadFloatOperands(MacroAssembler* masm,
224                                Register lhs,
225                                Register rhs);
226
227  // Test if operands are smi or number objects (fp). Requirements:
228  // operand_1 in rax, operand_2 in rdx; falls through on float or smi
229  // operands, jumps to the non_float label otherwise.
230  static void CheckNumberOperands(MacroAssembler* masm,
231                                  Label* non_float);
232
233  // Takes the operands in rdx and rax and loads them as integers in rax
234  // and rcx.
235  static void LoadAsIntegers(MacroAssembler* masm,
236                             bool use_sse3,
237                             Label* operand_conversion_failure);
238};
239
240
241// -----------------------------------------------------------------------------
242// CodeGenerator implementation.
243
244CodeGenerator::CodeGenerator(int buffer_size,
245                             Handle<Script> script,
246                             bool is_eval)
247    : is_eval_(is_eval),
248      script_(script),
249      deferred_(8),
250      masm_(new MacroAssembler(NULL, buffer_size)),
251      scope_(NULL),
252      frame_(NULL),
253      allocator_(NULL),
254      state_(NULL),
255      loop_nesting_(0),
256      function_return_is_shadowed_(false),
257      in_spilled_code_(false) {
258}
259
260
261void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
262  // Call the runtime to declare the globals.  The inevitable call
263  // will sync frame elements to memory anyway, so we do it eagerly to
264  // allow us to push the arguments directly into place.
265  frame_->SyncRange(0, frame_->element_count() - 1);
266
267  __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
268  frame_->EmitPush(rsi);  // The context is the first argument.
269  frame_->EmitPush(kScratchRegister);
270  frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
271  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
272  // Return value is ignored.
273}
274
275
276void CodeGenerator::GenCode(FunctionLiteral* function) {
277  // Record the position for debugging purposes.
278  CodeForFunctionPosition(function);
279  ZoneList<Statement*>* body = function->body();
280
281  // Initialize state.
282  ASSERT(scope_ == NULL);
283  scope_ = function->scope();
284  ASSERT(allocator_ == NULL);
285  RegisterAllocator register_allocator(this);
286  allocator_ = &register_allocator;
287  ASSERT(frame_ == NULL);
288  frame_ = new VirtualFrame();
289  set_in_spilled_code(false);
290
291  // Adjust for function-level loop nesting.
292  loop_nesting_ += function->loop_nesting();
293
294  JumpTarget::set_compiling_deferred_code(false);
295
296#ifdef DEBUG
297  if (strlen(FLAG_stop_at) > 0 &&
298      function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
299    frame_->SpillAll();
300    __ int3();
301  }
302#endif
303
304  // New scope to get automatic timing calculation.
305  {  // NOLINT
306    HistogramTimerScope codegen_timer(&Counters::code_generation);
307    CodeGenState state(this);
308
309    // Entry:
310    // Stack: receiver, arguments, return address.
311    // rbp: caller's frame pointer
312    // rsp: stack pointer
313    // rdi: called JS function
314    // rsi: callee's context
315    allocator_->Initialize();
316    frame_->Enter();
317
318    // Allocate space for locals and initialize them.
319    frame_->AllocateStackSlots();
320    // Initialize the function return target after the locals are set
321    // up, because it needs the expected frame height from the frame.
322    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
323    function_return_is_shadowed_ = false;
324
325    // Allocate the local context if needed.
326    int heap_slots = scope_->num_heap_slots();
327    if (heap_slots > 0) {
328      Comment cmnt(masm_, "[ allocate local context");
329      // Allocate local context.
330      // Get outer context and create a new context based on it.
331      frame_->PushFunction();
332      Result context;
333      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
334        FastNewContextStub stub(heap_slots);
335        context = frame_->CallStub(&stub, 1);
336      } else {
337        context = frame_->CallRuntime(Runtime::kNewContext, 1);
338      }
339
340      // Update context local.
341      frame_->SaveContextRegister();
342
343      // Verify that the runtime call result and rsi agree.
344      if (FLAG_debug_code) {
345        __ cmpq(context.reg(), rsi);
346        __ Assert(equal, "Runtime::NewContext should end up in rsi");
347      }
348    }
349
350    // TODO(1241774): Improve this code:
351    // 1) only needed if we have a context
352    // 2) no need to recompute context ptr every single time
353    // 3) don't copy parameter operand code from SlotOperand!
354    {
355      Comment cmnt2(masm_, "[ copy context parameters into .context");
356
357      // Note that iteration order is relevant here! If we have the same
358      // parameter twice (e.g., function (x, y, x)), and that parameter
359      // needs to be copied into the context, it must be the last argument
360      // passed to the parameter that needs to be copied. This is a rare
361      // case so we don't check for it, instead we rely on the copying
362      // order: such a parameter is copied repeatedly into the same
363      // context location and thus the last value is what is seen inside
364      // the function.
365      for (int i = 0; i < scope_->num_parameters(); i++) {
366        Variable* par = scope_->parameter(i);
367        Slot* slot = par->slot();
368        if (slot != NULL && slot->type() == Slot::CONTEXT) {
369          // The use of SlotOperand below is safe in unspilled code
370          // because the slot is guaranteed to be a context slot.
371          //
372          // There are no parameters in the global scope.
373          ASSERT(!scope_->is_global_scope());
374          frame_->PushParameterAt(i);
375          Result value = frame_->Pop();
376          value.ToRegister();
377
378          // SlotOperand loads context.reg() with the context object
379          // stored to, used below in RecordWrite.
380          Result context = allocator_->Allocate();
381          ASSERT(context.is_valid());
382          __ movq(SlotOperand(slot, context.reg()), value.reg());
383          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
384          Result scratch = allocator_->Allocate();
385          ASSERT(scratch.is_valid());
386          frame_->Spill(context.reg());
387          frame_->Spill(value.reg());
388          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
389        }
390      }
391    }
392
393    // Store the arguments object.  This must happen after context
394    // initialization because the arguments object may be stored in
395    // the context.
396    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
397      StoreArgumentsObject(true);
398    }
399
400    // Initialize ThisFunction reference if present.
401    if (scope_->is_function_scope() && scope_->function() != NULL) {
402      frame_->Push(Factory::the_hole_value());
403      StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
404    }
405
406    // Generate code to 'execute' declarations and initialize functions
407    // (source elements). In case of an illegal redeclaration we need to
408    // handle that instead of processing the declarations.
409    if (scope_->HasIllegalRedeclaration()) {
410      Comment cmnt(masm_, "[ illegal redeclarations");
411      scope_->VisitIllegalRedeclaration(this);
412    } else {
413      Comment cmnt(masm_, "[ declarations");
414      ProcessDeclarations(scope_->declarations());
415      // Bail out if a stack-overflow exception occurred when processing
416      // declarations.
417      if (HasStackOverflow()) return;
418    }
419
420    if (FLAG_trace) {
421      frame_->CallRuntime(Runtime::kTraceEnter, 0);
422      // Ignore the return value.
423    }
424    CheckStack();
425
426    // Compile the body of the function in a vanilla state. Don't
427    // bother compiling all the code if the scope has an illegal
428    // redeclaration.
429    if (!scope_->HasIllegalRedeclaration()) {
430      Comment cmnt(masm_, "[ function body");
431#ifdef DEBUG
432      bool is_builtin = Bootstrapper::IsActive();
433      bool should_trace =
434          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
435      if (should_trace) {
436        frame_->CallRuntime(Runtime::kDebugTrace, 0);
437        // Ignore the return value.
438      }
439#endif
440      VisitStatements(body);
441
442      // Handle the return from the function.
443      if (has_valid_frame()) {
444        // If there is a valid frame, control flow can fall off the end of
445        // the body.  In that case there is an implicit return statement.
446        ASSERT(!function_return_is_shadowed_);
447        CodeForReturnPosition(function);
448        frame_->PrepareForReturn();
449        Result undefined(Factory::undefined_value());
450        if (function_return_.is_bound()) {
451          function_return_.Jump(&undefined);
452        } else {
453          function_return_.Bind(&undefined);
454          GenerateReturnSequence(&undefined);
455        }
456      } else if (function_return_.is_linked()) {
457        // If the return target has dangling jumps to it, then we have not
458        // yet generated the return sequence.  This can happen when (a)
459        // control does not flow off the end of the body so we did not
460        // compile an artificial return statement just above, and (b) there
461        // are return statements in the body but (c) they are all shadowed.
462        Result return_value;
463        function_return_.Bind(&return_value);
464        GenerateReturnSequence(&return_value);
465      }
466    }
467  }
468
469  // Adjust for function-level loop nesting.
470  loop_nesting_ -= function->loop_nesting();
471
472  // Code generation state must be reset.
473  ASSERT(state_ == NULL);
474  ASSERT(loop_nesting() == 0);
475  ASSERT(!function_return_is_shadowed_);
476  function_return_.Unuse();
477  DeleteFrame();
478
479  // Process any deferred code using the register allocator.
480  if (!HasStackOverflow()) {
481    HistogramTimerScope deferred_timer(&Counters::deferred_code_generation);
482    JumpTarget::set_compiling_deferred_code(true);
483    ProcessDeferred();
484    JumpTarget::set_compiling_deferred_code(false);
485  }
486
487  // There is no need to delete the register allocator, it is a
488  // stack-allocated local.
489  allocator_ = NULL;
490  scope_ = NULL;
491}
492
493void CodeGenerator::GenerateReturnSequence(Result* return_value) {
494  // The return value is a live (but not currently reference counted)
495  // reference to rax.  This is safe because the current frame does not
496  // contain a reference to rax (it is prepared for the return by spilling
497  // all registers).
498  if (FLAG_trace) {
499    frame_->Push(return_value);
500    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
501  }
502  return_value->ToRegister(rax);
503
504  // Add a label for checking the size of the code used for returning.
505#ifdef DEBUG
506  Label check_exit_codesize;
507  masm_->bind(&check_exit_codesize);
508#endif
509
510  // Leave the frame and return popping the arguments and the
511  // receiver.
512  frame_->Exit();
513  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
514#ifdef ENABLE_DEBUGGER_SUPPORT
515  // Add padding that will be overwritten by a debugger breakpoint.
516  // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
517  // with length 7 (3 + 1 + 3).
518  const int kPadding = Assembler::kJSReturnSequenceLength - 7;
519  for (int i = 0; i < kPadding; ++i) {
520    masm_->int3();
521  }
522  // Check that the size of the code used for returning matches what is
523  // expected by the debugger.
524  ASSERT_EQ(Assembler::kJSReturnSequenceLength,
525            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
526#endif
527  DeleteFrame();
528}
529
530
531#ifdef DEBUG
532bool CodeGenerator::HasValidEntryRegisters() {
533  return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
534      && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
535      && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
536      && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
537      && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
538      && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
539      && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
540      && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
541      && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
542      && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
543      && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
544}
545#endif
546
547
548class DeferredReferenceGetKeyedValue: public DeferredCode {
549 public:
550  explicit DeferredReferenceGetKeyedValue(Register dst,
551                                          Register receiver,
552                                          Register key,
553                                          bool is_global)
554      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
555    set_comment("[ DeferredReferenceGetKeyedValue");
556  }
557
558  virtual void Generate();
559
560  Label* patch_site() { return &patch_site_; }
561
562 private:
563  Label patch_site_;
564  Register dst_;
565  Register receiver_;
566  Register key_;
567  bool is_global_;
568};
569
570
571void DeferredReferenceGetKeyedValue::Generate() {
572  __ push(receiver_);  // First IC argument.
573  __ push(key_);       // Second IC argument.
574
575  // Calculate the delta from the IC call instruction to the map check
576  // movq instruction in the inlined version.  This delta is stored in
577  // a test(rax, delta) instruction after the call so that we can find
578  // it in the IC initialization code and patch the movq instruction.
579  // This means that we cannot allow test instructions after calls to
580  // KeyedLoadIC stubs in other places.
581  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
582  RelocInfo::Mode mode = is_global_
583                         ? RelocInfo::CODE_TARGET_CONTEXT
584                         : RelocInfo::CODE_TARGET;
585  __ Call(ic, mode);
586  // The delta from the start of the map-compare instruction to the
587  // test instruction.  We use masm_-> directly here instead of the __
588  // macro because the macro sometimes uses macro expansion to turn
589  // into something that can't return a value.  This is encountered
590  // when doing generated code coverage tests.
591  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
592  // Here we use masm_-> instead of the __ macro because this is the
593  // instruction that gets patched and coverage code gets in the way.
594  // TODO(X64): Consider whether it's worth switching the test to a
595  // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
596  // be generated normally.
597  masm_->testl(rax, Immediate(-delta_to_patch_site));
598  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
599
600  if (!dst_.is(rax)) __ movq(dst_, rax);
601  __ pop(key_);
602  __ pop(receiver_);
603}
604
605
606class DeferredReferenceSetKeyedValue: public DeferredCode {
607 public:
608  DeferredReferenceSetKeyedValue(Register value,
609                                 Register key,
610                                 Register receiver)
611      : value_(value), key_(key), receiver_(receiver) {
612    set_comment("[ DeferredReferenceSetKeyedValue");
613  }
614
615  virtual void Generate();
616
617  Label* patch_site() { return &patch_site_; }
618
619 private:
620  Register value_;
621  Register key_;
622  Register receiver_;
623  Label patch_site_;
624};
625
626
627void DeferredReferenceSetKeyedValue::Generate() {
628  __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
629  // Push receiver and key arguments on the stack.
630  __ push(receiver_);
631  __ push(key_);
632  // Move value argument to eax as expected by the IC stub.
633  if (!value_.is(rax)) __ movq(rax, value_);
634  // Call the IC stub.
635  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
636  __ Call(ic, RelocInfo::CODE_TARGET);
637  // The delta from the start of the map-compare instructions (initial movq)
638  // to the test instruction.  We use masm_-> directly here instead of the
639  // __ macro because the macro sometimes uses macro expansion to turn
640  // into something that can't return a value.  This is encountered
641  // when doing generated code coverage tests.
642  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
643  // Here we use masm_-> instead of the __ macro because this is the
644  // instruction that gets patched and coverage code gets in the way.
645  masm_->testl(rax, Immediate(-delta_to_patch_site));
646  // Restore value (returned from store IC), key and receiver
647  // registers.
648  if (!value_.is(rax)) __ movq(value_, rax);
649  __ pop(key_);
650  __ pop(receiver_);
651}
652
653
654void CodeGenerator::CallApplyLazy(Expression* applicand,
655                                  Expression* receiver,
656                                  VariableProxy* arguments,
657                                  int position) {
658  // An optimized implementation of expressions of the form
659  // x.apply(y, arguments).
660  // If the arguments object of the scope has not been allocated,
661  // and x.apply is Function.prototype.apply, this optimization
662  // just copies y and the arguments of the current function on the
663  // stack, as receiver and arguments, and calls x.
664  // In the implementation comments, we call x the applicand
665  // and y the receiver.
666  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
667  ASSERT(arguments->IsArguments());
668
669  // Load applicand.apply onto the stack. This will usually
670  // give us a megamorphic load site. Not super, but it works.
671  Load(applicand);
672  Handle<String> name = Factory::LookupAsciiSymbol("apply");
673  frame()->Push(name);
674  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
675  __ nop();
676  frame()->Push(&answer);
677
678  // Load the receiver and the existing arguments object onto the
679  // expression stack. Avoid allocating the arguments object here.
680  Load(receiver);
681  LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
682
683  // Emit the source position information after having loaded the
684  // receiver and the arguments.
685  CodeForSourcePosition(position);
686  // Contents of frame at this point:
687  // Frame[0]: arguments object of the current function or the hole.
688  // Frame[1]: receiver
689  // Frame[2]: applicand.apply
690  // Frame[3]: applicand.
691
692  // Check if the arguments object has been lazily allocated
693  // already. If so, just use that instead of copying the arguments
694  // from the stack. This also deals with cases where a local variable
695  // named 'arguments' has been introduced.
696  frame_->Dup();
697  Result probe = frame_->Pop();
698  { VirtualFrame::SpilledScope spilled_scope;
699    Label slow, done;
700    bool try_lazy = true;
701    if (probe.is_constant()) {
702      try_lazy = probe.handle()->IsTheHole();
703    } else {
704      __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
705      probe.Unuse();
706      __ j(not_equal, &slow);
707    }
708
709    if (try_lazy) {
710      Label build_args;
711      // Get rid of the arguments object probe.
712      frame_->Drop();  // Can be called on a spilled frame.
713      // Stack now has 3 elements on it.
714      // Contents of stack at this point:
715      // rsp[0]: receiver
716      // rsp[1]: applicand.apply
717      // rsp[2]: applicand.
718
719      // Check that the receiver really is a JavaScript object.
720      __ movq(rax, Operand(rsp, 0));
721      Condition is_smi = masm_->CheckSmi(rax);
722      __ j(is_smi, &build_args);
723      // We allow all JSObjects including JSFunctions.  As long as
724      // JS_FUNCTION_TYPE is the last instance type and it is right
725      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
726      // bound.
727      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
728      ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
729      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
730      __ j(below, &build_args);
731
732      // Check that applicand.apply is Function.prototype.apply.
733      __ movq(rax, Operand(rsp, kPointerSize));
734      is_smi = masm_->CheckSmi(rax);
735      __ j(is_smi, &build_args);
736      __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
737      __ j(not_equal, &build_args);
738      __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
739      Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
740      __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
741      __ j(not_equal, &build_args);
742
743      // Check that applicand is a function.
744      __ movq(rdi, Operand(rsp, 2 * kPointerSize));
745      is_smi = masm_->CheckSmi(rdi);
746      __ j(is_smi, &build_args);
747      __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
748      __ j(not_equal, &build_args);
749
750      // Copy the arguments to this function possibly from the
751      // adaptor frame below it.
752      Label invoke, adapted;
753      __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
754      __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
755                    Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
756      __ j(equal, &adapted);
757
758      // No arguments adaptor frame. Copy fixed number of arguments.
759      __ movq(rax, Immediate(scope_->num_parameters()));
760      for (int i = 0; i < scope_->num_parameters(); i++) {
761        __ push(frame_->ParameterAt(i));
762      }
763      __ jmp(&invoke);
764
765      // Arguments adaptor frame present. Copy arguments from there, but
766      // avoid copying too many arguments to avoid stack overflows.
767      __ bind(&adapted);
768      static const uint32_t kArgumentsLimit = 1 * KB;
769      __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
770      __ SmiToInteger32(rax, rax);
771      __ movq(rcx, rax);
772      __ cmpq(rax, Immediate(kArgumentsLimit));
773      __ j(above, &build_args);
774
775      // Loop through the arguments pushing them onto the execution
776      // stack. We don't inform the virtual frame of the push, so we don't
777      // have to worry about getting rid of the elements from the virtual
778      // frame.
779      Label loop;
780      // rcx is a small non-negative integer, due to the test above.
781      __ testl(rcx, rcx);
782      __ j(zero, &invoke);
783      __ bind(&loop);
784      __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
785      __ decl(rcx);
786      __ j(not_zero, &loop);
787
788      // Invoke the function.
789      __ bind(&invoke);
790      ParameterCount actual(rax);
791      __ InvokeFunction(rdi, actual, CALL_FUNCTION);
792      // Drop applicand.apply and applicand from the stack, and push
793      // the result of the function call, but leave the spilled frame
794      // unchanged, with 3 elements, so it is correct when we compile the
795      // slow-case code.
796      __ addq(rsp, Immediate(2 * kPointerSize));
797      __ push(rax);
798      // Stack now has 1 element:
799      //   rsp[0]: result
800      __ jmp(&done);
801
802      // Slow-case: Allocate the arguments object since we know it isn't
803      // there, and fall-through to the slow-case where we call
804      // applicand.apply.
805      __ bind(&build_args);
806      // Stack now has 3 elements, because we have jumped from where:
807      // rsp[0]: receiver
808      // rsp[1]: applicand.apply
809      // rsp[2]: applicand.
810
811      // StoreArgumentsObject requires a correct frame, and may modify it.
812      Result arguments_object = StoreArgumentsObject(false);
813      frame_->SpillAll();
814      arguments_object.ToRegister();
815      frame_->EmitPush(arguments_object.reg());
816      arguments_object.Unuse();
817      // Stack and frame now have 4 elements.
818      __ bind(&slow);
819    }
820
821    // Generic computation of x.apply(y, args) with no special optimization.
822    // Flip applicand.apply and applicand on the stack, so
823    // applicand looks like the receiver of the applicand.apply call.
824    // Then process it as a normal function call.
825    __ movq(rax, Operand(rsp, 3 * kPointerSize));
826    __ movq(rbx, Operand(rsp, 2 * kPointerSize));
827    __ movq(Operand(rsp, 2 * kPointerSize), rax);
828    __ movq(Operand(rsp, 3 * kPointerSize), rbx);
829
830    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
831    Result res = frame_->CallStub(&call_function, 3);
832    // The function and its two arguments have been dropped.
833    frame_->Drop(1);  // Drop the receiver as well.
834    res.ToRegister();
835    frame_->EmitPush(res.reg());
836    // Stack now has 1 element:
837    //   rsp[0]: result
838    if (try_lazy) __ bind(&done);
839  }  // End of spilled scope.
840  // Restore the context register after a call.
841  frame_->RestoreContextRegister();
842}
843
844
845class DeferredStackCheck: public DeferredCode {
846 public:
847  DeferredStackCheck() {
848    set_comment("[ DeferredStackCheck");
849  }
850
851  virtual void Generate();
852};
853
854
855void DeferredStackCheck::Generate() {
856  StackCheckStub stub;
857  __ CallStub(&stub);
858}
859
860
861void CodeGenerator::CheckStack() {
862  DeferredStackCheck* deferred = new DeferredStackCheck;
863  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
864  deferred->Branch(below);
865  deferred->BindExit();
866}
867
868
869void CodeGenerator::VisitAndSpill(Statement* statement) {
870  // TODO(X64): No architecture specific code. Move to shared location.
871  ASSERT(in_spilled_code());
872  set_in_spilled_code(false);
873  Visit(statement);
874  if (frame_ != NULL) {
875    frame_->SpillAll();
876  }
877  set_in_spilled_code(true);
878}
879
880
881void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
882  ASSERT(in_spilled_code());
883  set_in_spilled_code(false);
884  VisitStatements(statements);
885  if (frame_ != NULL) {
886    frame_->SpillAll();
887  }
888  set_in_spilled_code(true);
889}
890
891
892void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
893  ASSERT(!in_spilled_code());
894  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
895    Visit(statements->at(i));
896  }
897}
898
899
900void CodeGenerator::VisitBlock(Block* node) {
901  ASSERT(!in_spilled_code());
902  Comment cmnt(masm_, "[ Block");
903  CodeForStatementPosition(node);
904  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
905  VisitStatements(node->statements());
906  if (node->break_target()->is_linked()) {
907    node->break_target()->Bind();
908  }
909  node->break_target()->Unuse();
910}
911
912
913void CodeGenerator::VisitDeclaration(Declaration* node) {
914  Comment cmnt(masm_, "[ Declaration");
915  Variable* var = node->proxy()->var();
916  ASSERT(var != NULL);  // must have been resolved
917  Slot* slot = var->slot();
918
919  // If it was not possible to allocate the variable at compile time,
920  // we need to "declare" it at runtime to make sure it actually
921  // exists in the local context.
922  if (slot != NULL && slot->type() == Slot::LOOKUP) {
923    // Variables with a "LOOKUP" slot were introduced as non-locals
924    // during variable resolution and must have mode DYNAMIC.
925    ASSERT(var->is_dynamic());
926    // For now, just do a runtime call.  Sync the virtual frame eagerly
927    // so we can simply push the arguments into place.
928    frame_->SyncRange(0, frame_->element_count() - 1);
929    frame_->EmitPush(rsi);
930    __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
931    frame_->EmitPush(kScratchRegister);
932    // Declaration nodes are always introduced in one of two modes.
933    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
934    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
935    frame_->EmitPush(Smi::FromInt(attr));
936    // Push initial value, if any.
937    // Note: For variables we must not push an initial value (such as
938    // 'undefined') because we may have a (legal) redeclaration and we
939    // must not destroy the current value.
940    if (node->mode() == Variable::CONST) {
941      frame_->EmitPush(Heap::kTheHoleValueRootIndex);
942    } else if (node->fun() != NULL) {
943      Load(node->fun());
944    } else {
945      frame_->EmitPush(Smi::FromInt(0));  // no initial value!
946    }
947    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
948    // Ignore the return value (declarations are statements).
949    return;
950  }
951
952  ASSERT(!var->is_global());
953
954  // If we have a function or a constant, we need to initialize the variable.
955  Expression* val = NULL;
956  if (node->mode() == Variable::CONST) {
957    val = new Literal(Factory::the_hole_value());
958  } else {
959    val = node->fun();  // NULL if we don't have a function
960  }
961
962  if (val != NULL) {
963    {
964      // Set the initial value.
965      Reference target(this, node->proxy());
966      Load(val);
967      target.SetValue(NOT_CONST_INIT);
968      // The reference is removed from the stack (preserving TOS) when
969      // it goes out of scope.
970    }
971    // Get rid of the assigned value (declarations are statements).
972    frame_->Drop();
973  }
974}
975
976
977void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
978  ASSERT(!in_spilled_code());
979  Comment cmnt(masm_, "[ ExpressionStatement");
980  CodeForStatementPosition(node);
981  Expression* expression = node->expression();
982  expression->MarkAsStatement();
983  Load(expression);
984  // Remove the lingering expression result from the top of stack.
985  frame_->Drop();
986}
987
988
989void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
990  ASSERT(!in_spilled_code());
991  Comment cmnt(masm_, "// EmptyStatement");
992  CodeForStatementPosition(node);
993  // nothing to do
994}
995
996
997void CodeGenerator::VisitIfStatement(IfStatement* node) {
998  ASSERT(!in_spilled_code());
999  Comment cmnt(masm_, "[ IfStatement");
1000  // Generate different code depending on which parts of the if statement
1001  // are present or not.
1002  bool has_then_stm = node->HasThenStatement();
1003  bool has_else_stm = node->HasElseStatement();
1004
1005  CodeForStatementPosition(node);
1006  JumpTarget exit;
1007  if (has_then_stm && has_else_stm) {
1008    JumpTarget then;
1009    JumpTarget else_;
1010    ControlDestination dest(&then, &else_, true);
1011    LoadCondition(node->condition(), &dest, true);
1012
1013    if (dest.false_was_fall_through()) {
1014      // The else target was bound, so we compile the else part first.
1015      Visit(node->else_statement());
1016
1017      // We may have dangling jumps to the then part.
1018      if (then.is_linked()) {
1019        if (has_valid_frame()) exit.Jump();
1020        then.Bind();
1021        Visit(node->then_statement());
1022      }
1023    } else {
1024      // The then target was bound, so we compile the then part first.
1025      Visit(node->then_statement());
1026
1027      if (else_.is_linked()) {
1028        if (has_valid_frame()) exit.Jump();
1029        else_.Bind();
1030        Visit(node->else_statement());
1031      }
1032    }
1033
1034  } else if (has_then_stm) {
1035    ASSERT(!has_else_stm);
1036    JumpTarget then;
1037    ControlDestination dest(&then, &exit, true);
1038    LoadCondition(node->condition(), &dest, true);
1039
1040    if (dest.false_was_fall_through()) {
1041      // The exit label was bound.  We may have dangling jumps to the
1042      // then part.
1043      if (then.is_linked()) {
1044        exit.Unuse();
1045        exit.Jump();
1046        then.Bind();
1047        Visit(node->then_statement());
1048      }
1049    } else {
1050      // The then label was bound.
1051      Visit(node->then_statement());
1052    }
1053
1054  } else if (has_else_stm) {
1055    ASSERT(!has_then_stm);
1056    JumpTarget else_;
1057    ControlDestination dest(&exit, &else_, false);
1058    LoadCondition(node->condition(), &dest, true);
1059
1060    if (dest.true_was_fall_through()) {
1061      // The exit label was bound.  We may have dangling jumps to the
1062      // else part.
1063      if (else_.is_linked()) {
1064        exit.Unuse();
1065        exit.Jump();
1066        else_.Bind();
1067        Visit(node->else_statement());
1068      }
1069    } else {
1070      // The else label was bound.
1071      Visit(node->else_statement());
1072    }
1073
1074  } else {
1075    ASSERT(!has_then_stm && !has_else_stm);
1076    // We only care about the condition's side effects (not its value
1077    // or control flow effect).  LoadCondition is called without
1078    // forcing control flow.
1079    ControlDestination dest(&exit, &exit, true);
1080    LoadCondition(node->condition(), &dest, false);
1081    if (!dest.is_used()) {
1082      // We got a value on the frame rather than (or in addition to)
1083      // control flow.
1084      frame_->Drop();
1085    }
1086  }
1087
1088  if (exit.is_linked()) {
1089    exit.Bind();
1090  }
1091}
1092
1093
1094void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
1095  ASSERT(!in_spilled_code());
1096  Comment cmnt(masm_, "[ ContinueStatement");
1097  CodeForStatementPosition(node);
1098  node->target()->continue_target()->Jump();
1099}
1100
1101
1102void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
1103  ASSERT(!in_spilled_code());
1104  Comment cmnt(masm_, "[ BreakStatement");
1105  CodeForStatementPosition(node);
1106  node->target()->break_target()->Jump();
1107}
1108
1109
1110void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
1111  ASSERT(!in_spilled_code());
1112  Comment cmnt(masm_, "[ ReturnStatement");
1113
1114  CodeForStatementPosition(node);
1115  Load(node->expression());
1116  Result return_value = frame_->Pop();
1117  if (function_return_is_shadowed_) {
1118    function_return_.Jump(&return_value);
1119  } else {
1120    frame_->PrepareForReturn();
1121    if (function_return_.is_bound()) {
1122      // If the function return label is already bound we reuse the
1123      // code by jumping to the return site.
1124      function_return_.Jump(&return_value);
1125    } else {
1126      function_return_.Bind(&return_value);
1127      GenerateReturnSequence(&return_value);
1128    }
1129  }
1130}
1131
1132
1133void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
1134  ASSERT(!in_spilled_code());
1135  Comment cmnt(masm_, "[ WithEnterStatement");
1136  CodeForStatementPosition(node);
1137  Load(node->expression());
1138  Result context;
1139  if (node->is_catch_block()) {
1140    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
1141  } else {
1142    context = frame_->CallRuntime(Runtime::kPushContext, 1);
1143  }
1144
1145  // Update context local.
1146  frame_->SaveContextRegister();
1147
1148  // Verify that the runtime call result and rsi agree.
1149  if (FLAG_debug_code) {
1150    __ cmpq(context.reg(), rsi);
1151    __ Assert(equal, "Runtime::NewContext should end up in rsi");
1152  }
1153}
1154
1155
1156void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
1157  ASSERT(!in_spilled_code());
1158  Comment cmnt(masm_, "[ WithExitStatement");
1159  CodeForStatementPosition(node);
1160  // Pop context.
1161  __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
1162  // Update context local.
1163  frame_->SaveContextRegister();
1164}
1165
1166
1167void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
1168  // TODO(X64): This code is completely generic and should be moved somewhere
1169  // where it can be shared between architectures.
1170  ASSERT(!in_spilled_code());
1171  Comment cmnt(masm_, "[ SwitchStatement");
1172  CodeForStatementPosition(node);
1173  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1174
1175  // Compile the switch value.
1176  Load(node->tag());
1177
1178  ZoneList<CaseClause*>* cases = node->cases();
1179  int length = cases->length();
1180  CaseClause* default_clause = NULL;
1181
1182  JumpTarget next_test;
1183  // Compile the case label expressions and comparisons.  Exit early
1184  // if a comparison is unconditionally true.  The target next_test is
1185  // bound before the loop in order to indicate control flow to the
1186  // first comparison.
1187  next_test.Bind();
1188  for (int i = 0; i < length && !next_test.is_unused(); i++) {
1189    CaseClause* clause = cases->at(i);
1190    // The default is not a test, but remember it for later.
1191    if (clause->is_default()) {
1192      default_clause = clause;
1193      continue;
1194    }
1195
1196    Comment cmnt(masm_, "[ Case comparison");
1197    // We recycle the same target next_test for each test.  Bind it if
1198    // the previous test has not done so and then unuse it for the
1199    // loop.
1200    if (next_test.is_linked()) {
1201      next_test.Bind();
1202    }
1203    next_test.Unuse();
1204
1205    // Duplicate the switch value.
1206    frame_->Dup();
1207
1208    // Compile the label expression.
1209    Load(clause->label());
1210
1211    // Compare and branch to the body if true or the next test if
1212    // false.  Prefer the next test as a fall through.
1213    ControlDestination dest(clause->body_target(), &next_test, false);
1214    Comparison(equal, true, &dest);
1215
1216    // If the comparison fell through to the true target, jump to the
1217    // actual body.
1218    if (dest.true_was_fall_through()) {
1219      clause->body_target()->Unuse();
1220      clause->body_target()->Jump();
1221    }
1222  }
1223
1224  // If there was control flow to a next test from the last one
1225  // compiled, compile a jump to the default or break target.
1226  if (!next_test.is_unused()) {
1227    if (next_test.is_linked()) {
1228      next_test.Bind();
1229    }
1230    // Drop the switch value.
1231    frame_->Drop();
1232    if (default_clause != NULL) {
1233      default_clause->body_target()->Jump();
1234    } else {
1235      node->break_target()->Jump();
1236    }
1237  }
1238
1239  // The last instruction emitted was a jump, either to the default
1240  // clause or the break target, or else to a case body from the loop
1241  // that compiles the tests.
1242  ASSERT(!has_valid_frame());
1243  // Compile case bodies as needed.
1244  for (int i = 0; i < length; i++) {
1245    CaseClause* clause = cases->at(i);
1246
1247    // There are two ways to reach the body: from the corresponding
1248    // test or as the fall through of the previous body.
1249    if (clause->body_target()->is_linked() || has_valid_frame()) {
1250      if (clause->body_target()->is_linked()) {
1251        if (has_valid_frame()) {
1252          // If we have both a jump to the test and a fall through, put
1253          // a jump on the fall through path to avoid the dropping of
1254          // the switch value on the test path.  The exception is the
1255          // default which has already had the switch value dropped.
1256          if (clause->is_default()) {
1257            clause->body_target()->Bind();
1258          } else {
1259            JumpTarget body;
1260            body.Jump();
1261            clause->body_target()->Bind();
1262            frame_->Drop();
1263            body.Bind();
1264          }
1265        } else {
1266          // No fall through to worry about.
1267          clause->body_target()->Bind();
1268          if (!clause->is_default()) {
1269            frame_->Drop();
1270          }
1271        }
1272      } else {
1273        // Otherwise, we have only fall through.
1274        ASSERT(has_valid_frame());
1275      }
1276
1277      // We are now prepared to compile the body.
1278      Comment cmnt(masm_, "[ Case body");
1279      VisitStatements(clause->statements());
1280    }
1281    clause->body_target()->Unuse();
1282  }
1283
1284  // We may not have a valid frame here so bind the break target only
1285  // if needed.
1286  if (node->break_target()->is_linked()) {
1287    node->break_target()->Bind();
1288  }
1289  node->break_target()->Unuse();
1290}
1291
1292
1293void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
1294  ASSERT(!in_spilled_code());
1295  Comment cmnt(masm_, "[ DoWhileStatement");
1296  CodeForStatementPosition(node);
1297  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1298  JumpTarget body(JumpTarget::BIDIRECTIONAL);
1299  IncrementLoopNesting();
1300
1301  ConditionAnalysis info = AnalyzeCondition(node->cond());
1302  // Label the top of the loop for the backward jump if necessary.
1303  switch (info) {
1304    case ALWAYS_TRUE:
1305      // Use the continue target.
1306      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1307      node->continue_target()->Bind();
1308      break;
1309    case ALWAYS_FALSE:
1310      // No need to label it.
1311      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1312      break;
1313    case DONT_KNOW:
1314      // Continue is the test, so use the backward body target.
1315      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1316      body.Bind();
1317      break;
1318  }
1319
1320  CheckStack();  // TODO(1222600): ignore if body contains calls.
1321  Visit(node->body());
1322
1323  // Compile the test.
1324  switch (info) {
1325    case ALWAYS_TRUE:
1326      // If control flow can fall off the end of the body, jump back
1327      // to the top and bind the break target at the exit.
1328      if (has_valid_frame()) {
1329        node->continue_target()->Jump();
1330      }
1331      if (node->break_target()->is_linked()) {
1332        node->break_target()->Bind();
1333      }
1334      break;
1335    case ALWAYS_FALSE:
1336      // We may have had continues or breaks in the body.
1337      if (node->continue_target()->is_linked()) {
1338        node->continue_target()->Bind();
1339      }
1340      if (node->break_target()->is_linked()) {
1341        node->break_target()->Bind();
1342      }
1343      break;
1344    case DONT_KNOW:
1345      // We have to compile the test expression if it can be reached by
1346      // control flow falling out of the body or via continue.
1347      if (node->continue_target()->is_linked()) {
1348        node->continue_target()->Bind();
1349      }
1350      if (has_valid_frame()) {
1351        Comment cmnt(masm_, "[ DoWhileCondition");
1352        CodeForDoWhileConditionPosition(node);
1353        ControlDestination dest(&body, node->break_target(), false);
1354        LoadCondition(node->cond(), &dest, true);
1355      }
1356      if (node->break_target()->is_linked()) {
1357        node->break_target()->Bind();
1358      }
1359      break;
1360  }
1361
1362  DecrementLoopNesting();
1363  node->continue_target()->Unuse();
1364  node->break_target()->Unuse();
1365}
1366
1367
1368void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
1369  ASSERT(!in_spilled_code());
1370  Comment cmnt(masm_, "[ WhileStatement");
1371  CodeForStatementPosition(node);
1372
1373  // If the condition is always false and has no side effects, we do not
1374  // need to compile anything.
1375  ConditionAnalysis info = AnalyzeCondition(node->cond());
1376  if (info == ALWAYS_FALSE) return;
1377
1378  // Do not duplicate conditions that may have function literal
1379  // subexpressions.  This can cause us to compile the function literal
1380  // twice.
1381  bool test_at_bottom = !node->may_have_function_literal();
1382  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1383  IncrementLoopNesting();
1384  JumpTarget body;
1385  if (test_at_bottom) {
1386    body.set_direction(JumpTarget::BIDIRECTIONAL);
1387  }
1388
1389  // Based on the condition analysis, compile the test as necessary.
1390  switch (info) {
1391    case ALWAYS_TRUE:
1392      // We will not compile the test expression.  Label the top of the
1393      // loop with the continue target.
1394      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1395      node->continue_target()->Bind();
1396      break;
1397    case DONT_KNOW: {
1398      if (test_at_bottom) {
1399        // Continue is the test at the bottom, no need to label the test
1400        // at the top.  The body is a backward target.
1401        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1402      } else {
1403        // Label the test at the top as the continue target.  The body
1404        // is a forward-only target.
1405        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1406        node->continue_target()->Bind();
1407      }
1408      // Compile the test with the body as the true target and preferred
1409      // fall-through and with the break target as the false target.
1410      ControlDestination dest(&body, node->break_target(), true);
1411      LoadCondition(node->cond(), &dest, true);
1412
1413      if (dest.false_was_fall_through()) {
1414        // If we got the break target as fall-through, the test may have
1415        // been unconditionally false (if there are no jumps to the
1416        // body).
1417        if (!body.is_linked()) {
1418          DecrementLoopNesting();
1419          return;
1420        }
1421
1422        // Otherwise, jump around the body on the fall through and then
1423        // bind the body target.
1424        node->break_target()->Unuse();
1425        node->break_target()->Jump();
1426        body.Bind();
1427      }
1428      break;
1429    }
1430    case ALWAYS_FALSE:
1431      UNREACHABLE();
1432      break;
1433  }
1434
1435  CheckStack();  // TODO(1222600): ignore if body contains calls.
1436  Visit(node->body());
1437
1438  // Based on the condition analysis, compile the backward jump as
1439  // necessary.
1440  switch (info) {
1441    case ALWAYS_TRUE:
1442      // The loop body has been labeled with the continue target.
1443      if (has_valid_frame()) {
1444        node->continue_target()->Jump();
1445      }
1446      break;
1447    case DONT_KNOW:
1448      if (test_at_bottom) {
1449        // If we have chosen to recompile the test at the bottom,
1450        // then it is the continue target.
1451        if (node->continue_target()->is_linked()) {
1452          node->continue_target()->Bind();
1453        }
1454        if (has_valid_frame()) {
1455          // The break target is the fall-through (body is a backward
1456          // jump from here and thus an invalid fall-through).
1457          ControlDestination dest(&body, node->break_target(), false);
1458          LoadCondition(node->cond(), &dest, true);
1459        }
1460      } else {
1461        // If we have chosen not to recompile the test at the
1462        // bottom, jump back to the one at the top.
1463        if (has_valid_frame()) {
1464          node->continue_target()->Jump();
1465        }
1466      }
1467      break;
1468    case ALWAYS_FALSE:
1469      UNREACHABLE();
1470      break;
1471  }
1472
1473  // The break target may be already bound (by the condition), or there
1474  // may not be a valid frame.  Bind it only if needed.
1475  if (node->break_target()->is_linked()) {
1476    node->break_target()->Bind();
1477  }
1478  DecrementLoopNesting();
1479}
1480
1481
1482void CodeGenerator::VisitForStatement(ForStatement* node) {
1483  ASSERT(!in_spilled_code());
1484  Comment cmnt(masm_, "[ ForStatement");
1485  CodeForStatementPosition(node);
1486
1487  // Compile the init expression if present.
1488  if (node->init() != NULL) {
1489    Visit(node->init());
1490  }
1491
1492  // If the condition is always false and has no side effects, we do not
1493  // need to compile anything else.
1494  ConditionAnalysis info = AnalyzeCondition(node->cond());
1495  if (info == ALWAYS_FALSE) return;
1496
1497  // Do not duplicate conditions that may have function literal
1498  // subexpressions.  This can cause us to compile the function literal
1499  // twice.
1500  bool test_at_bottom = !node->may_have_function_literal();
1501  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1502  IncrementLoopNesting();
1503
1504  // Target for backward edge if no test at the bottom, otherwise
1505  // unused.
1506  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1507
1508  // Target for backward edge if there is a test at the bottom,
1509  // otherwise used as target for test at the top.
1510  JumpTarget body;
1511  if (test_at_bottom) {
1512    body.set_direction(JumpTarget::BIDIRECTIONAL);
1513  }
1514
1515  // Based on the condition analysis, compile the test as necessary.
1516  switch (info) {
1517    case ALWAYS_TRUE:
1518      // We will not compile the test expression.  Label the top of the
1519      // loop.
1520      if (node->next() == NULL) {
1521        // Use the continue target if there is no update expression.
1522        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1523        node->continue_target()->Bind();
1524      } else {
1525        // Otherwise use the backward loop target.
1526        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1527        loop.Bind();
1528      }
1529      break;
1530    case DONT_KNOW: {
1531      if (test_at_bottom) {
1532        // Continue is either the update expression or the test at the
1533        // bottom, no need to label the test at the top.
1534        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1535      } else if (node->next() == NULL) {
1536        // We are not recompiling the test at the bottom and there is no
1537        // update expression.
1538        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
1539        node->continue_target()->Bind();
1540      } else {
1541        // We are not recompiling the test at the bottom and there is an
1542        // update expression.
1543        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1544        loop.Bind();
1545      }
1546
1547      // Compile the test with the body as the true target and preferred
1548      // fall-through and with the break target as the false target.
1549      ControlDestination dest(&body, node->break_target(), true);
1550      LoadCondition(node->cond(), &dest, true);
1551
1552      if (dest.false_was_fall_through()) {
1553        // If we got the break target as fall-through, the test may have
1554        // been unconditionally false (if there are no jumps to the
1555        // body).
1556        if (!body.is_linked()) {
1557          DecrementLoopNesting();
1558          return;
1559        }
1560
1561        // Otherwise, jump around the body on the fall through and then
1562        // bind the body target.
1563        node->break_target()->Unuse();
1564        node->break_target()->Jump();
1565        body.Bind();
1566      }
1567      break;
1568    }
1569    case ALWAYS_FALSE:
1570      UNREACHABLE();
1571      break;
1572  }
1573
1574  CheckStack();  // TODO(1222600): ignore if body contains calls.
1575  Visit(node->body());
1576
1577  // If there is an update expression, compile it if necessary.
1578  if (node->next() != NULL) {
1579    if (node->continue_target()->is_linked()) {
1580      node->continue_target()->Bind();
1581    }
1582
1583    // Control can reach the update by falling out of the body or by a
1584    // continue.
1585    if (has_valid_frame()) {
1586      // Record the source position of the statement as this code which
1587      // is after the code for the body actually belongs to the loop
1588      // statement and not the body.
1589      CodeForStatementPosition(node);
1590      Visit(node->next());
1591    }
1592  }
1593
1594  // Based on the condition analysis, compile the backward jump as
1595  // necessary.
1596  switch (info) {
1597    case ALWAYS_TRUE:
1598      if (has_valid_frame()) {
1599        if (node->next() == NULL) {
1600          node->continue_target()->Jump();
1601        } else {
1602          loop.Jump();
1603        }
1604      }
1605      break;
1606    case DONT_KNOW:
1607      if (test_at_bottom) {
1608        if (node->continue_target()->is_linked()) {
1609          // We can have dangling jumps to the continue target if there
1610          // was no update expression.
1611          node->continue_target()->Bind();
1612        }
1613        // Control can reach the test at the bottom by falling out of
1614        // the body, by a continue in the body, or from the update
1615        // expression.
1616        if (has_valid_frame()) {
1617          // The break target is the fall-through (body is a backward
1618          // jump from here).
1619          ControlDestination dest(&body, node->break_target(), false);
1620          LoadCondition(node->cond(), &dest, true);
1621        }
1622      } else {
1623        // Otherwise, jump back to the test at the top.
1624        if (has_valid_frame()) {
1625          if (node->next() == NULL) {
1626            node->continue_target()->Jump();
1627          } else {
1628            loop.Jump();
1629          }
1630        }
1631      }
1632      break;
1633    case ALWAYS_FALSE:
1634      UNREACHABLE();
1635      break;
1636  }
1637
1638  // The break target may be already bound (by the condition), or there
1639  // may not be a valid frame.  Bind it only if needed.
1640  if (node->break_target()->is_linked()) {
1641    node->break_target()->Bind();
1642  }
1643  DecrementLoopNesting();
1644}
1645
1646
1647void CodeGenerator::VisitForInStatement(ForInStatement* node) {
1648  ASSERT(!in_spilled_code());
1649  VirtualFrame::SpilledScope spilled_scope;
1650  Comment cmnt(masm_, "[ ForInStatement");
1651  CodeForStatementPosition(node);
1652
1653  JumpTarget primitive;
1654  JumpTarget jsobject;
1655  JumpTarget fixed_array;
1656  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
1657  JumpTarget end_del_check;
1658  JumpTarget exit;
1659
1660  // Get the object to enumerate over (converted to JSObject).
1661  LoadAndSpill(node->enumerable());
1662
1663  // Both SpiderMonkey and kjs ignore null and undefined in contrast
1664  // to the specification.  12.6.4 mandates a call to ToObject.
1665  frame_->EmitPop(rax);
1666
1667  // rax: value to be iterated over
1668  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1669  exit.Branch(equal);
1670  __ CompareRoot(rax, Heap::kNullValueRootIndex);
1671  exit.Branch(equal);
1672
1673  // Stack layout in body:
1674  // [iteration counter (smi)] <- slot 0
1675  // [length of array]         <- slot 1
1676  // [FixedArray]              <- slot 2
1677  // [Map or 0]                <- slot 3
1678  // [Object]                  <- slot 4
1679
1680  // Check if enumerable is already a JSObject
1681  // rax: value to be iterated over
1682  Condition is_smi = masm_->CheckSmi(rax);
1683  primitive.Branch(is_smi);
1684  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
1685  jsobject.Branch(above_equal);
1686
1687  primitive.Bind();
1688  frame_->EmitPush(rax);
1689  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
1690  // function call returns the value in rax, which is where we want it below
1691
1692  jsobject.Bind();
1693  // Get the set of properties (as a FixedArray or Map).
1694  // rax: value to be iterated over
1695  frame_->EmitPush(rax);  // Push the object being iterated over.
1696
1697
1698  // Check cache validity in generated code. This is a fast case for
1699  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
1700  // guarantee cache validity, call the runtime system to check cache
1701  // validity or get the property names in a fixed array.
1702  JumpTarget call_runtime;
1703  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
1704  JumpTarget check_prototype;
1705  JumpTarget use_cache;
1706  __ movq(rcx, rax);
1707  loop.Bind();
1708  // Check that there are no elements.
1709  __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
1710  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1711  call_runtime.Branch(not_equal);
1712  // Check that instance descriptors are not empty so that we can
1713  // check for an enum cache.  Leave the map in ebx for the subsequent
1714  // prototype load.
1715  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
1716  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
1717  __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
1718  call_runtime.Branch(equal);
1719  // Check that there in an enum cache in the non-empty instance
1720  // descriptors.  This is the case if the next enumeration index
1721  // field does not contain a smi.
1722  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
1723  is_smi = masm_->CheckSmi(rdx);
1724  call_runtime.Branch(is_smi);
1725  // For all objects but the receiver, check that the cache is empty.
1726  __ cmpq(rcx, rax);
1727  check_prototype.Branch(equal);
1728  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1729  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
1730  call_runtime.Branch(not_equal);
1731  check_prototype.Bind();
1732  // Load the prototype from the map and loop if non-null.
1733  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
1734  __ CompareRoot(rcx, Heap::kNullValueRootIndex);
1735  loop.Branch(not_equal);
1736  // The enum cache is valid.  Load the map of the object being
1737  // iterated over and use the cache for the iteration.
1738  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
1739  use_cache.Jump();
1740
1741  call_runtime.Bind();
1742  // Call the runtime to get the property names for the object.
1743  frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
1744  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
1745
1746  // If we got a Map, we can do a fast modification check.
1747  // Otherwise, we got a FixedArray, and we have to do a slow check.
1748  // rax: map or fixed array (result from call to
1749  // Runtime::kGetPropertyNamesFast)
1750  __ movq(rdx, rax);
1751  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
1752  __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
1753  fixed_array.Branch(not_equal);
1754
1755  use_cache.Bind();
1756  // Get enum cache
1757  // rax: map (either the result from a call to
1758  // Runtime::kGetPropertyNamesFast or has been fetched directly from
1759  // the object)
1760  __ movq(rcx, rax);
1761  __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1762  // Get the bridge array held in the enumeration index field.
1763  __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1764  // Get the cache from the bridge array.
1765  __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1766
1767  frame_->EmitPush(rax);  // <- slot 3
1768  frame_->EmitPush(rdx);  // <- slot 2
1769  __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1770  __ Integer32ToSmi(rax, rax);
1771  frame_->EmitPush(rax);  // <- slot 1
1772  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
1773  entry.Jump();
1774
1775  fixed_array.Bind();
1776  // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
1777  frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
1778  frame_->EmitPush(rax);  // <- slot 2
1779
1780  // Push the length of the array and the initial index onto the stack.
1781  __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1782  __ Integer32ToSmi(rax, rax);
1783  frame_->EmitPush(rax);  // <- slot 1
1784  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
1785
1786  // Condition.
1787  entry.Bind();
1788  // Grab the current frame's height for the break and continue
1789  // targets only after all the state is pushed on the frame.
1790  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1791  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1792
1793  __ movq(rax, frame_->ElementAt(0));  // load the current count
1794  __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
1795  node->break_target()->Branch(below_equal);
1796
1797  // Get the i'th entry of the array.
1798  __ movq(rdx, frame_->ElementAt(2));
1799  SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
1800  __ movq(rbx,
1801          FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
1802
1803  // Get the expected map from the stack or a zero map in the
1804  // permanent slow case rax: current iteration count rbx: i'th entry
1805  // of the enum cache
1806  __ movq(rdx, frame_->ElementAt(3));
1807  // Check if the expected map still matches that of the enumerable.
1808  // If not, we have to filter the key.
1809  // rax: current iteration count
1810  // rbx: i'th entry of the enum cache
1811  // rdx: expected map value
1812  __ movq(rcx, frame_->ElementAt(4));
1813  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
1814  __ cmpq(rcx, rdx);
1815  end_del_check.Branch(equal);
1816
1817  // Convert the entry to a string (or null if it isn't a property anymore).
1818  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
1819  frame_->EmitPush(rbx);  // push entry
1820  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
1821  __ movq(rbx, rax);
1822
1823  // If the property has been removed while iterating, we just skip it.
1824  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
1825  node->continue_target()->Branch(equal);
1826
1827  end_del_check.Bind();
1828  // Store the entry in the 'each' expression and take another spin in the
1829  // loop.  rdx: i'th entry of the enum cache (or string there of)
1830  frame_->EmitPush(rbx);
1831  { Reference each(this, node->each());
1832    // Loading a reference may leave the frame in an unspilled state.
1833    frame_->SpillAll();
1834    if (!each.is_illegal()) {
1835      if (each.size() > 0) {
1836        frame_->EmitPush(frame_->ElementAt(each.size()));
1837        each.SetValue(NOT_CONST_INIT);
1838        frame_->Drop(2);  // Drop the original and the copy of the element.
1839      } else {
1840        // If the reference has size zero then we can use the value below
1841        // the reference as if it were above the reference, instead of pushing
1842        // a new copy of it above the reference.
1843        each.SetValue(NOT_CONST_INIT);
1844        frame_->Drop();  // Drop the original of the element.
1845      }
1846    }
1847  }
1848  // Unloading a reference may leave the frame in an unspilled state.
1849  frame_->SpillAll();
1850
1851  // Body.
1852  CheckStack();  // TODO(1222600): ignore if body contains calls.
1853  VisitAndSpill(node->body());
1854
1855  // Next.  Reestablish a spilled frame in case we are coming here via
1856  // a continue in the body.
1857  node->continue_target()->Bind();
1858  frame_->SpillAll();
1859  frame_->EmitPop(rax);
1860  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
1861  frame_->EmitPush(rax);
1862  entry.Jump();
1863
1864  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
1865  // any frame.
1866  node->break_target()->Bind();
1867  frame_->Drop(5);
1868
1869  // Exit.
1870  exit.Bind();
1871
1872  node->continue_target()->Unuse();
1873  node->break_target()->Unuse();
1874}
1875
1876void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
1877  ASSERT(!in_spilled_code());
1878  VirtualFrame::SpilledScope spilled_scope;
1879  Comment cmnt(masm_, "[ TryCatchStatement");
1880  CodeForStatementPosition(node);
1881
1882  JumpTarget try_block;
1883  JumpTarget exit;
1884
1885  try_block.Call();
1886  // --- Catch block ---
1887  frame_->EmitPush(rax);
1888
1889  // Store the caught exception in the catch variable.
1890  Variable* catch_var = node->catch_var()->var();
1891  ASSERT(catch_var != NULL && catch_var->slot() != NULL);
1892  StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
1893
1894  // Remove the exception from the stack.
1895  frame_->Drop();
1896
1897  VisitStatementsAndSpill(node->catch_block()->statements());
1898  if (has_valid_frame()) {
1899    exit.Jump();
1900  }
1901
1902
1903  // --- Try block ---
1904  try_block.Bind();
1905
1906  frame_->PushTryHandler(TRY_CATCH_HANDLER);
1907  int handler_height = frame_->height();
1908
1909  // Shadow the jump targets for all escapes from the try block, including
1910  // returns.  During shadowing, the original target is hidden as the
1911  // ShadowTarget and operations on the original actually affect the
1912  // shadowing target.
1913  //
1914  // We should probably try to unify the escaping targets and the return
1915  // target.
1916  int nof_escapes = node->escaping_targets()->length();
1917  List<ShadowTarget*> shadows(1 + nof_escapes);
1918
1919  // Add the shadow target for the function return.
1920  static const int kReturnShadowIndex = 0;
1921  shadows.Add(new ShadowTarget(&function_return_));
1922  bool function_return_was_shadowed = function_return_is_shadowed_;
1923  function_return_is_shadowed_ = true;
1924  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
1925
1926  // Add the remaining shadow targets.
1927  for (int i = 0; i < nof_escapes; i++) {
1928    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
1929  }
1930
1931  // Generate code for the statements in the try block.
1932  VisitStatementsAndSpill(node->try_block()->statements());
1933
1934  // Stop the introduced shadowing and count the number of required unlinks.
1935  // After shadowing stops, the original targets are unshadowed and the
1936  // ShadowTargets represent the formerly shadowing targets.
1937  bool has_unlinks = false;
1938  for (int i = 0; i < shadows.length(); i++) {
1939    shadows[i]->StopShadowing();
1940    has_unlinks = has_unlinks || shadows[i]->is_linked();
1941  }
1942  function_return_is_shadowed_ = function_return_was_shadowed;
1943
1944  // Get an external reference to the handler address.
1945  ExternalReference handler_address(Top::k_handler_address);
1946
1947  // Make sure that there's nothing left on the stack above the
1948  // handler structure.
1949  if (FLAG_debug_code) {
1950    __ movq(kScratchRegister, handler_address);
1951    __ cmpq(rsp, Operand(kScratchRegister, 0));
1952    __ Assert(equal, "stack pointer should point to top handler");
1953  }
1954
1955  // If we can fall off the end of the try block, unlink from try chain.
1956  if (has_valid_frame()) {
1957    // The next handler address is on top of the frame.  Unlink from
1958    // the handler list and drop the rest of this handler from the
1959    // frame.
1960    ASSERT(StackHandlerConstants::kNextOffset == 0);
1961    __ movq(kScratchRegister, handler_address);
1962    frame_->EmitPop(Operand(kScratchRegister, 0));
1963    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1964    if (has_unlinks) {
1965      exit.Jump();
1966    }
1967  }
1968
1969  // Generate unlink code for the (formerly) shadowing targets that
1970  // have been jumped to.  Deallocate each shadow target.
1971  Result return_value;
1972  for (int i = 0; i < shadows.length(); i++) {
1973    if (shadows[i]->is_linked()) {
1974      // Unlink from try chain; be careful not to destroy the TOS if
1975      // there is one.
1976      if (i == kReturnShadowIndex) {
1977        shadows[i]->Bind(&return_value);
1978        return_value.ToRegister(rax);
1979      } else {
1980        shadows[i]->Bind();
1981      }
1982      // Because we can be jumping here (to spilled code) from
1983      // unspilled code, we need to reestablish a spilled frame at
1984      // this block.
1985      frame_->SpillAll();
1986
1987      // Reload sp from the top handler, because some statements that we
1988      // break from (eg, for...in) may have left stuff on the stack.
1989      __ movq(kScratchRegister, handler_address);
1990      __ movq(rsp, Operand(kScratchRegister, 0));
1991      frame_->Forget(frame_->height() - handler_height);
1992
1993      ASSERT(StackHandlerConstants::kNextOffset == 0);
1994      __ movq(kScratchRegister, handler_address);
1995      frame_->EmitPop(Operand(kScratchRegister, 0));
1996      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
1997
1998      if (i == kReturnShadowIndex) {
1999        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
2000        shadows[i]->other_target()->Jump(&return_value);
2001      } else {
2002        shadows[i]->other_target()->Jump();
2003      }
2004    }
2005  }
2006
2007  exit.Bind();
2008}
2009
2010
2011void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
2012  ASSERT(!in_spilled_code());
2013  VirtualFrame::SpilledScope spilled_scope;
2014  Comment cmnt(masm_, "[ TryFinallyStatement");
2015  CodeForStatementPosition(node);
2016
2017  // State: Used to keep track of reason for entering the finally
2018  // block. Should probably be extended to hold information for
2019  // break/continue from within the try block.
2020  enum { FALLING, THROWING, JUMPING };
2021
2022  JumpTarget try_block;
2023  JumpTarget finally_block;
2024
2025  try_block.Call();
2026
2027  frame_->EmitPush(rax);
2028  // In case of thrown exceptions, this is where we continue.
2029  __ Move(rcx, Smi::FromInt(THROWING));
2030  finally_block.Jump();
2031
2032  // --- Try block ---
2033  try_block.Bind();
2034
2035  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
2036  int handler_height = frame_->height();
2037
2038  // Shadow the jump targets for all escapes from the try block, including
2039  // returns.  During shadowing, the original target is hidden as the
2040  // ShadowTarget and operations on the original actually affect the
2041  // shadowing target.
2042  //
2043  // We should probably try to unify the escaping targets and the return
2044  // target.
2045  int nof_escapes = node->escaping_targets()->length();
2046  List<ShadowTarget*> shadows(1 + nof_escapes);
2047
2048  // Add the shadow target for the function return.
2049  static const int kReturnShadowIndex = 0;
2050  shadows.Add(new ShadowTarget(&function_return_));
2051  bool function_return_was_shadowed = function_return_is_shadowed_;
2052  function_return_is_shadowed_ = true;
2053  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
2054
2055  // Add the remaining shadow targets.
2056  for (int i = 0; i < nof_escapes; i++) {
2057    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
2058  }
2059
2060  // Generate code for the statements in the try block.
2061  VisitStatementsAndSpill(node->try_block()->statements());
2062
2063  // Stop the introduced shadowing and count the number of required unlinks.
2064  // After shadowing stops, the original targets are unshadowed and the
2065  // ShadowTargets represent the formerly shadowing targets.
2066  int nof_unlinks = 0;
2067  for (int i = 0; i < shadows.length(); i++) {
2068    shadows[i]->StopShadowing();
2069    if (shadows[i]->is_linked()) nof_unlinks++;
2070  }
2071  function_return_is_shadowed_ = function_return_was_shadowed;
2072
2073  // Get an external reference to the handler address.
2074  ExternalReference handler_address(Top::k_handler_address);
2075
2076  // If we can fall off the end of the try block, unlink from the try
2077  // chain and set the state on the frame to FALLING.
2078  if (has_valid_frame()) {
2079    // The next handler address is on top of the frame.
2080    ASSERT(StackHandlerConstants::kNextOffset == 0);
2081    __ movq(kScratchRegister, handler_address);
2082    frame_->EmitPop(Operand(kScratchRegister, 0));
2083    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2084
2085    // Fake a top of stack value (unneeded when FALLING) and set the
2086    // state in ecx, then jump around the unlink blocks if any.
2087    frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2088    __ Move(rcx, Smi::FromInt(FALLING));
2089    if (nof_unlinks > 0) {
2090      finally_block.Jump();
2091    }
2092  }
2093
2094  // Generate code to unlink and set the state for the (formerly)
2095  // shadowing targets that have been jumped to.
2096  for (int i = 0; i < shadows.length(); i++) {
2097    if (shadows[i]->is_linked()) {
2098      // If we have come from the shadowed return, the return value is
2099      // on the virtual frame.  We must preserve it until it is
2100      // pushed.
2101      if (i == kReturnShadowIndex) {
2102        Result return_value;
2103        shadows[i]->Bind(&return_value);
2104        return_value.ToRegister(rax);
2105      } else {
2106        shadows[i]->Bind();
2107      }
2108      // Because we can be jumping here (to spilled code) from
2109      // unspilled code, we need to reestablish a spilled frame at
2110      // this block.
2111      frame_->SpillAll();
2112
2113      // Reload sp from the top handler, because some statements that
2114      // we break from (eg, for...in) may have left stuff on the
2115      // stack.
2116      __ movq(kScratchRegister, handler_address);
2117      __ movq(rsp, Operand(kScratchRegister, 0));
2118      frame_->Forget(frame_->height() - handler_height);
2119
2120      // Unlink this handler and drop it from the frame.
2121      ASSERT(StackHandlerConstants::kNextOffset == 0);
2122      __ movq(kScratchRegister, handler_address);
2123      frame_->EmitPop(Operand(kScratchRegister, 0));
2124      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
2125
2126      if (i == kReturnShadowIndex) {
2127        // If this target shadowed the function return, materialize
2128        // the return value on the stack.
2129        frame_->EmitPush(rax);
2130      } else {
2131        // Fake TOS for targets that shadowed breaks and continues.
2132        frame_->EmitPush(Heap::kUndefinedValueRootIndex);
2133      }
2134      __ Move(rcx, Smi::FromInt(JUMPING + i));
2135      if (--nof_unlinks > 0) {
2136        // If this is not the last unlink block, jump around the next.
2137        finally_block.Jump();
2138      }
2139    }
2140  }
2141
2142  // --- Finally block ---
2143  finally_block.Bind();
2144
2145  // Push the state on the stack.
2146  frame_->EmitPush(rcx);
2147
2148  // We keep two elements on the stack - the (possibly faked) result
2149  // and the state - while evaluating the finally block.
2150  //
2151  // Generate code for the statements in the finally block.
2152  VisitStatementsAndSpill(node->finally_block()->statements());
2153
2154  if (has_valid_frame()) {
2155    // Restore state and return value or faked TOS.
2156    frame_->EmitPop(rcx);
2157    frame_->EmitPop(rax);
2158  }
2159
2160  // Generate code to jump to the right destination for all used
2161  // formerly shadowing targets.  Deallocate each shadow target.
2162  for (int i = 0; i < shadows.length(); i++) {
2163    if (has_valid_frame() && shadows[i]->is_bound()) {
2164      BreakTarget* original = shadows[i]->other_target();
2165      __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
2166      if (i == kReturnShadowIndex) {
2167        // The return value is (already) in rax.
2168        Result return_value = allocator_->Allocate(rax);
2169        ASSERT(return_value.is_valid());
2170        if (function_return_is_shadowed_) {
2171          original->Branch(equal, &return_value);
2172        } else {
2173          // Branch around the preparation for return which may emit
2174          // code.
2175          JumpTarget skip;
2176          skip.Branch(not_equal);
2177          frame_->PrepareForReturn();
2178          original->Jump(&return_value);
2179          skip.Bind();
2180        }
2181      } else {
2182        original->Branch(equal);
2183      }
2184    }
2185  }
2186
2187  if (has_valid_frame()) {
2188    // Check if we need to rethrow the exception.
2189    JumpTarget exit;
2190    __ SmiCompare(rcx, Smi::FromInt(THROWING));
2191    exit.Branch(not_equal);
2192
2193    // Rethrow exception.
2194    frame_->EmitPush(rax);  // undo pop from above
2195    frame_->CallRuntime(Runtime::kReThrow, 1);
2196
2197    // Done.
2198    exit.Bind();
2199  }
2200}
2201
2202
2203void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
2204  ASSERT(!in_spilled_code());
2205  Comment cmnt(masm_, "[ DebuggerStatement");
2206  CodeForStatementPosition(node);
2207#ifdef ENABLE_DEBUGGER_SUPPORT
2208  // Spill everything, even constants, to the frame.
2209  frame_->SpillAll();
2210  frame_->CallRuntime(Runtime::kDebugBreak, 0);
2211  // Ignore the return value.
2212#endif
2213}
2214
2215
2216void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
2217  ASSERT(boilerplate->IsBoilerplate());
2218
2219  // The inevitable call will sync frame elements to memory anyway, so
2220  // we do it eagerly to allow us to push the arguments directly into
2221  // place.
2222  frame_->SyncRange(0, frame_->element_count() - 1);
2223
2224  // Use the fast case closure allocation code that allocates in new
2225  // space for nested functions that don't need literals cloning.
2226  if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
2227    FastNewClosureStub stub;
2228    frame_->Push(boilerplate);
2229    Result answer = frame_->CallStub(&stub, 1);
2230    frame_->Push(&answer);
2231  } else {
2232    // Call the runtime to instantiate the function boilerplate
2233    // object.
2234    frame_->EmitPush(rsi);
2235    frame_->EmitPush(boilerplate);
2236    Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
2237    frame_->Push(&result);
2238  }
2239}
2240
2241
2242void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
2243  Comment cmnt(masm_, "[ FunctionLiteral");
2244
2245  // Build the function boilerplate and instantiate it.
2246  Handle<JSFunction> boilerplate =
2247      Compiler::BuildBoilerplate(node, script_, this);
2248  // Check for stack-overflow exception.
2249  if (HasStackOverflow()) return;
2250  InstantiateBoilerplate(boilerplate);
2251}
2252
2253
2254void CodeGenerator::VisitFunctionBoilerplateLiteral(
2255    FunctionBoilerplateLiteral* node) {
2256  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
2257  InstantiateBoilerplate(node->boilerplate());
2258}
2259
2260
2261void CodeGenerator::VisitConditional(Conditional* node) {
2262  Comment cmnt(masm_, "[ Conditional");
2263  JumpTarget then;
2264  JumpTarget else_;
2265  JumpTarget exit;
2266  ControlDestination dest(&then, &else_, true);
2267  LoadCondition(node->condition(), &dest, true);
2268
2269  if (dest.false_was_fall_through()) {
2270    // The else target was bound, so we compile the else part first.
2271    Load(node->else_expression());
2272
2273    if (then.is_linked()) {
2274      exit.Jump();
2275      then.Bind();
2276      Load(node->then_expression());
2277    }
2278  } else {
2279    // The then target was bound, so we compile the then part first.
2280    Load(node->then_expression());
2281
2282    if (else_.is_linked()) {
2283      exit.Jump();
2284      else_.Bind();
2285      Load(node->else_expression());
2286    }
2287  }
2288
2289  exit.Bind();
2290}
2291
2292
2293void CodeGenerator::VisitSlot(Slot* node) {
2294  Comment cmnt(masm_, "[ Slot");
2295  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
2296}
2297
2298
2299void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
2300  Comment cmnt(masm_, "[ VariableProxy");
2301  Variable* var = node->var();
2302  Expression* expr = var->rewrite();
2303  if (expr != NULL) {
2304    Visit(expr);
2305  } else {
2306    ASSERT(var->is_global());
2307    Reference ref(this, node);
2308    ref.GetValue();
2309  }
2310}
2311
2312
2313void CodeGenerator::VisitLiteral(Literal* node) {
2314  Comment cmnt(masm_, "[ Literal");
2315  frame_->Push(node->handle());
2316}
2317
2318
2319// Materialize the regexp literal 'node' in the literals array
2320// 'literals' of the function.  Leave the regexp boilerplate in
2321// 'boilerplate'.
2322class DeferredRegExpLiteral: public DeferredCode {
2323 public:
2324  DeferredRegExpLiteral(Register boilerplate,
2325                        Register literals,
2326                        RegExpLiteral* node)
2327      : boilerplate_(boilerplate), literals_(literals), node_(node) {
2328    set_comment("[ DeferredRegExpLiteral");
2329  }
2330
2331  void Generate();
2332
2333 private:
2334  Register boilerplate_;
2335  Register literals_;
2336  RegExpLiteral* node_;
2337};
2338
2339
2340void DeferredRegExpLiteral::Generate() {
2341  // Since the entry is undefined we call the runtime system to
2342  // compute the literal.
2343  // Literal array (0).
2344  __ push(literals_);
2345  // Literal index (1).
2346  __ Push(Smi::FromInt(node_->literal_index()));
2347  // RegExp pattern (2).
2348  __ Push(node_->pattern());
2349  // RegExp flags (3).
2350  __ Push(node_->flags());
2351  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
2352  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
2353}
2354
2355
2356void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
2357  Comment cmnt(masm_, "[ RegExp Literal");
2358
2359  // Retrieve the literals array and check the allocated entry.  Begin
2360  // with a writable copy of the function of this activation in a
2361  // register.
2362  frame_->PushFunction();
2363  Result literals = frame_->Pop();
2364  literals.ToRegister();
2365  frame_->Spill(literals.reg());
2366
2367  // Load the literals array of the function.
2368  __ movq(literals.reg(),
2369          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2370
2371  // Load the literal at the ast saved index.
2372  Result boilerplate = allocator_->Allocate();
2373  ASSERT(boilerplate.is_valid());
2374  int literal_offset =
2375      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
2376  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
2377
2378  // Check whether we need to materialize the RegExp object.  If so,
2379  // jump to the deferred code passing the literals array.
2380  DeferredRegExpLiteral* deferred =
2381      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
2382  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
2383  deferred->Branch(equal);
2384  deferred->BindExit();
2385  literals.Unuse();
2386
2387  // Push the boilerplate object.
2388  frame_->Push(&boilerplate);
2389}
2390
2391
2392void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
2393  Comment cmnt(masm_, "[ ObjectLiteral");
2394
2395  // Load a writable copy of the function of this activation in a
2396  // register.
2397  frame_->PushFunction();
2398  Result literals = frame_->Pop();
2399  literals.ToRegister();
2400  frame_->Spill(literals.reg());
2401
2402  // Load the literals array of the function.
2403  __ movq(literals.reg(),
2404          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2405  // Literal array.
2406  frame_->Push(&literals);
2407  // Literal index.
2408  frame_->Push(Smi::FromInt(node->literal_index()));
2409  // Constant properties.
2410  frame_->Push(node->constant_properties());
2411  Result clone;
2412  if (node->depth() > 1) {
2413    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
2414  } else {
2415    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
2416  }
2417  frame_->Push(&clone);
2418
2419  for (int i = 0; i < node->properties()->length(); i++) {
2420    ObjectLiteral::Property* property = node->properties()->at(i);
2421    switch (property->kind()) {
2422      case ObjectLiteral::Property::CONSTANT:
2423        break;
2424      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
2425        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
2426        // else fall through.
2427      case ObjectLiteral::Property::COMPUTED: {
2428        Handle<Object> key(property->key()->handle());
2429        if (key->IsSymbol()) {
2430          // Duplicate the object as the IC receiver.
2431          frame_->Dup();
2432          Load(property->value());
2433          frame_->Push(key);
2434          Result ignored = frame_->CallStoreIC();
2435          // Drop the duplicated receiver and ignore the result.
2436          frame_->Drop();
2437          break;
2438        }
2439        // Fall through
2440      }
2441      case ObjectLiteral::Property::PROTOTYPE: {
2442        // Duplicate the object as an argument to the runtime call.
2443        frame_->Dup();
2444        Load(property->key());
2445        Load(property->value());
2446        Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
2447        // Ignore the result.
2448        break;
2449      }
2450      case ObjectLiteral::Property::SETTER: {
2451        // Duplicate the object as an argument to the runtime call.
2452        frame_->Dup();
2453        Load(property->key());
2454        frame_->Push(Smi::FromInt(1));
2455        Load(property->value());
2456        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2457        // Ignore the result.
2458        break;
2459      }
2460      case ObjectLiteral::Property::GETTER: {
2461        // Duplicate the object as an argument to the runtime call.
2462        frame_->Dup();
2463        Load(property->key());
2464        frame_->Push(Smi::FromInt(0));
2465        Load(property->value());
2466        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
2467        // Ignore the result.
2468        break;
2469      }
2470      default: UNREACHABLE();
2471    }
2472  }
2473}
2474
2475
2476void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
2477  Comment cmnt(masm_, "[ ArrayLiteral");
2478
2479  // Load a writable copy of the function of this activation in a
2480  // register.
2481  frame_->PushFunction();
2482  Result literals = frame_->Pop();
2483  literals.ToRegister();
2484  frame_->Spill(literals.reg());
2485
2486  // Load the literals array of the function.
2487  __ movq(literals.reg(),
2488          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
2489  // Literal array.
2490  frame_->Push(&literals);
2491  // Literal index.
2492  frame_->Push(Smi::FromInt(node->literal_index()));
2493  // Constant elements.
2494  frame_->Push(node->constant_elements());
2495  Result clone;
2496  if (node->depth() > 1) {
2497    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
2498  } else {
2499    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
2500  }
2501  frame_->Push(&clone);
2502
2503  // Generate code to set the elements in the array that are not
2504  // literals.
2505  for (int i = 0; i < node->values()->length(); i++) {
2506    Expression* value = node->values()->at(i);
2507
2508    // If value is a literal the property value is already set in the
2509    // boilerplate object.
2510    if (value->AsLiteral() != NULL) continue;
2511    // If value is a materialized literal the property value is already set
2512    // in the boilerplate object if it is simple.
2513    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
2514
2515    // The property must be set by generated code.
2516    Load(value);
2517
2518    // Get the property value off the stack.
2519    Result prop_value = frame_->Pop();
2520    prop_value.ToRegister();
2521
2522    // Fetch the array literal while leaving a copy on the stack and
2523    // use it to get the elements array.
2524    frame_->Dup();
2525    Result elements = frame_->Pop();
2526    elements.ToRegister();
2527    frame_->Spill(elements.reg());
2528    // Get the elements FixedArray.
2529    __ movq(elements.reg(),
2530            FieldOperand(elements.reg(), JSObject::kElementsOffset));
2531
2532    // Write to the indexed properties array.
2533    int offset = i * kPointerSize + FixedArray::kHeaderSize;
2534    __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
2535
2536    // Update the write barrier for the array address.
2537    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
2538    Result scratch = allocator_->Allocate();
2539    ASSERT(scratch.is_valid());
2540    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
2541  }
2542}
2543
2544
2545void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
2546  ASSERT(!in_spilled_code());
2547  // Call runtime routine to allocate the catch extension object and
2548  // assign the exception value to the catch variable.
2549  Comment cmnt(masm_, "[ CatchExtensionObject");
2550  Load(node->key());
2551  Load(node->value());
2552  Result result =
2553      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
2554  frame_->Push(&result);
2555}
2556
2557
2558void CodeGenerator::VisitAssignment(Assignment* node) {
2559  Comment cmnt(masm_, "[ Assignment");
2560
2561  { Reference target(this, node->target(), node->is_compound());
2562    if (target.is_illegal()) {
2563      // Fool the virtual frame into thinking that we left the assignment's
2564      // value on the frame.
2565      frame_->Push(Smi::FromInt(0));
2566      return;
2567    }
2568    Variable* var = node->target()->AsVariableProxy()->AsVariable();
2569
2570    if (node->starts_initialization_block()) {
2571      ASSERT(target.type() == Reference::NAMED ||
2572             target.type() == Reference::KEYED);
2573      // Change to slow case in the beginning of an initialization
2574      // block to avoid the quadratic behavior of repeatedly adding
2575      // fast properties.
2576
2577      // The receiver is the argument to the runtime call.  It is the
2578      // first value pushed when the reference was loaded to the
2579      // frame.
2580      frame_->PushElementAt(target.size() - 1);
2581      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
2582    }
2583    if (node->ends_initialization_block()) {
2584      // Add an extra copy of the receiver to the frame, so that it can be
2585      // converted back to fast case after the assignment.
2586      ASSERT(target.type() == Reference::NAMED ||
2587             target.type() == Reference::KEYED);
2588      if (target.type() == Reference::NAMED) {
2589        frame_->Dup();
2590        // Dup target receiver on stack.
2591      } else {
2592        ASSERT(target.type() == Reference::KEYED);
2593        Result temp = frame_->Pop();
2594        frame_->Dup();
2595        frame_->Push(&temp);
2596      }
2597    }
2598    if (node->op() == Token::ASSIGN ||
2599        node->op() == Token::INIT_VAR ||
2600        node->op() == Token::INIT_CONST) {
2601      Load(node->value());
2602
2603    } else {  // Assignment is a compound assignment.
2604      Literal* literal = node->value()->AsLiteral();
2605      bool overwrite_value =
2606          (node->value()->AsBinaryOperation() != NULL &&
2607           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2608      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
2609      // There are two cases where the target is not read in the right hand
2610      // side, that are easy to test for: the right hand side is a literal,
2611      // or the right hand side is a different variable.  TakeValue invalidates
2612      // the target, with an implicit promise that it will be written to again
2613      // before it is read.
2614      if (literal != NULL || (right_var != NULL && right_var != var)) {
2615        target.TakeValue();
2616      } else {
2617        target.GetValue();
2618      }
2619      Load(node->value());
2620      GenericBinaryOperation(node->binary_op(),
2621                             node->type(),
2622                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
2623    }
2624
2625    if (var != NULL &&
2626        var->mode() == Variable::CONST &&
2627        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2628      // Assignment ignored - leave the value on the stack.
2629      UnloadReference(&target);
2630    } else {
2631      CodeForSourcePosition(node->position());
2632      if (node->op() == Token::INIT_CONST) {
2633        // Dynamic constant initializations must use the function context
2634        // and initialize the actual constant declared. Dynamic variable
2635        // initializations are simply assignments and use SetValue.
2636        target.SetValue(CONST_INIT);
2637      } else {
2638        target.SetValue(NOT_CONST_INIT);
2639      }
2640      if (node->ends_initialization_block()) {
2641        ASSERT(target.type() == Reference::UNLOADED);
2642        // End of initialization block. Revert to fast case.  The
2643        // argument to the runtime call is the extra copy of the receiver,
2644        // which is below the value of the assignment.
2645        // Swap the receiver and the value of the assignment expression.
2646        Result lhs = frame_->Pop();
2647        Result receiver = frame_->Pop();
2648        frame_->Push(&lhs);
2649        frame_->Push(&receiver);
2650        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
2651      }
2652    }
2653  }
2654}
2655
2656
2657void CodeGenerator::VisitThrow(Throw* node) {
2658  Comment cmnt(masm_, "[ Throw");
2659  Load(node->exception());
2660  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
2661  frame_->Push(&result);
2662}
2663
2664
2665void CodeGenerator::VisitProperty(Property* node) {
2666  Comment cmnt(masm_, "[ Property");
2667  Reference property(this, node);
2668  property.GetValue();
2669}
2670
2671
2672void CodeGenerator::VisitCall(Call* node) {
2673  Comment cmnt(masm_, "[ Call");
2674
2675  ZoneList<Expression*>* args = node->arguments();
2676
2677  // Check if the function is a variable or a property.
2678  Expression* function = node->expression();
2679  Variable* var = function->AsVariableProxy()->AsVariable();
2680  Property* property = function->AsProperty();
2681
2682  // ------------------------------------------------------------------------
2683  // Fast-case: Use inline caching.
2684  // ---
2685  // According to ECMA-262, section 11.2.3, page 44, the function to call
2686  // must be resolved after the arguments have been evaluated. The IC code
2687  // automatically handles this by loading the arguments before the function
2688  // is resolved in cache misses (this also holds for megamorphic calls).
2689  // ------------------------------------------------------------------------
2690
2691  if (var != NULL && var->is_possibly_eval()) {
2692    // ----------------------------------
2693    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
2694    // ----------------------------------
2695
2696    // In a call to eval, we first call %ResolvePossiblyDirectEval to
2697    // resolve the function we need to call and the receiver of the
2698    // call.  Then we call the resolved function using the given
2699    // arguments.
2700
2701    // Prepare the stack for the call to the resolved function.
2702    Load(function);
2703
2704    // Allocate a frame slot for the receiver.
2705    frame_->Push(Factory::undefined_value());
2706    int arg_count = args->length();
2707    for (int i = 0; i < arg_count; i++) {
2708      Load(args->at(i));
2709    }
2710
2711    // Prepare the stack for the call to ResolvePossiblyDirectEval.
2712    frame_->PushElementAt(arg_count + 1);
2713    if (arg_count > 0) {
2714      frame_->PushElementAt(arg_count);
2715    } else {
2716      frame_->Push(Factory::undefined_value());
2717    }
2718
2719    // Push the receiver.
2720    frame_->PushParameterAt(-1);
2721
2722    // Resolve the call.
2723    Result result =
2724        frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
2725
2726    // The runtime call returns a pair of values in rax (function) and
2727    // rdx (receiver). Touch up the stack with the right values.
2728    Result receiver = allocator_->Allocate(rdx);
2729    frame_->SetElementAt(arg_count + 1, &result);
2730    frame_->SetElementAt(arg_count, &receiver);
2731    receiver.Unuse();
2732
2733    // Call the function.
2734    CodeForSourcePosition(node->position());
2735    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
2736    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
2737    result = frame_->CallStub(&call_function, arg_count + 1);
2738
2739    // Restore the context and overwrite the function on the stack with
2740    // the result.
2741    frame_->RestoreContextRegister();
2742    frame_->SetElementAt(0, &result);
2743
2744  } else if (var != NULL && !var->is_this() && var->is_global()) {
2745    // ----------------------------------
2746    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
2747    // ----------------------------------
2748
2749    // Push the name of the function and the receiver onto the stack.
2750    frame_->Push(var->name());
2751
2752    // Pass the global object as the receiver and let the IC stub
2753    // patch the stack to use the global proxy as 'this' in the
2754    // invoked function.
2755    LoadGlobal();
2756
2757    // Load the arguments.
2758    int arg_count = args->length();
2759    for (int i = 0; i < arg_count; i++) {
2760      Load(args->at(i));
2761    }
2762
2763    // Call the IC initialization code.
2764    CodeForSourcePosition(node->position());
2765    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
2766                                       arg_count,
2767                                       loop_nesting());
2768    frame_->RestoreContextRegister();
2769    // Replace the function on the stack with the result.
2770    frame_->SetElementAt(0, &result);
2771
2772  } else if (var != NULL && var->slot() != NULL &&
2773             var->slot()->type() == Slot::LOOKUP) {
2774    // ----------------------------------
2775    // JavaScript example: 'with (obj) foo(1, 2, 3)'  // foo is in obj
2776    // ----------------------------------
2777
2778    // Load the function from the context.  Sync the frame so we can
2779    // push the arguments directly into place.
2780    frame_->SyncRange(0, frame_->element_count() - 1);
2781    frame_->EmitPush(rsi);
2782    frame_->EmitPush(var->name());
2783    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
2784    // The runtime call returns a pair of values in rax and rdx.  The
2785    // looked-up function is in rax and the receiver is in rdx.  These
2786    // register references are not ref counted here.  We spill them
2787    // eagerly since they are arguments to an inevitable call (and are
2788    // not sharable by the arguments).
2789    ASSERT(!allocator()->is_used(rax));
2790    frame_->EmitPush(rax);
2791
2792    // Load the receiver.
2793    ASSERT(!allocator()->is_used(rdx));
2794    frame_->EmitPush(rdx);
2795
2796    // Call the function.
2797    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
2798
2799  } else if (property != NULL) {
2800    // Check if the key is a literal string.
2801    Literal* literal = property->key()->AsLiteral();
2802
2803    if (literal != NULL && literal->handle()->IsSymbol()) {
2804      // ------------------------------------------------------------------
2805      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
2806      // ------------------------------------------------------------------
2807
2808      Handle<String> name = Handle<String>::cast(literal->handle());
2809
2810      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
2811          name->IsEqualTo(CStrVector("apply")) &&
2812          args->length() == 2 &&
2813          args->at(1)->AsVariableProxy() != NULL &&
2814          args->at(1)->AsVariableProxy()->IsArguments()) {
2815        // Use the optimized Function.prototype.apply that avoids
2816        // allocating lazily allocated arguments objects.
2817        CallApplyLazy(property->obj(),
2818                      args->at(0),
2819                      args->at(1)->AsVariableProxy(),
2820                      node->position());
2821
2822      } else {
2823        // Push the name of the function and the receiver onto the stack.
2824        frame_->Push(name);
2825        Load(property->obj());
2826
2827        // Load the arguments.
2828        int arg_count = args->length();
2829        for (int i = 0; i < arg_count; i++) {
2830          Load(args->at(i));
2831        }
2832
2833        // Call the IC initialization code.
2834        CodeForSourcePosition(node->position());
2835        Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2836                                           arg_count,
2837                                           loop_nesting());
2838        frame_->RestoreContextRegister();
2839        // Replace the function on the stack with the result.
2840        frame_->SetElementAt(0, &result);
2841      }
2842
2843    } else {
2844      // -------------------------------------------
2845      // JavaScript example: 'array[index](1, 2, 3)'
2846      // -------------------------------------------
2847
2848      // Load the function to call from the property through a reference.
2849      if (property->is_synthetic()) {
2850        Reference ref(this, property, false);
2851        ref.GetValue();
2852        // Use global object as receiver.
2853        LoadGlobalReceiver();
2854      } else {
2855        Reference ref(this, property, false);
2856        ASSERT(ref.size() == 2);
2857        Result key = frame_->Pop();
2858        frame_->Dup();  // Duplicate the receiver.
2859        frame_->Push(&key);
2860        ref.GetValue();
2861        // Top of frame contains function to call, with duplicate copy of
2862        // receiver below it.  Swap them.
2863        Result function = frame_->Pop();
2864        Result receiver = frame_->Pop();
2865        frame_->Push(&function);
2866        frame_->Push(&receiver);
2867      }
2868
2869      // Call the function.
2870      CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
2871    }
2872
2873  } else {
2874    // ----------------------------------
2875    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
2876    // ----------------------------------
2877
2878    // Load the function.
2879    Load(function);
2880
2881    // Pass the global proxy as the receiver.
2882    LoadGlobalReceiver();
2883
2884    // Call the function.
2885    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
2886  }
2887}
2888
2889
2890void CodeGenerator::VisitCallNew(CallNew* node) {
2891  Comment cmnt(masm_, "[ CallNew");
2892
2893  // According to ECMA-262, section 11.2.2, page 44, the function
2894  // expression in new calls must be evaluated before the
2895  // arguments. This is different from ordinary calls, where the
2896  // actual function to call is resolved after the arguments have been
2897  // evaluated.
2898
2899  // Compute function to call and use the global object as the
2900  // receiver. There is no need to use the global proxy here because
2901  // it will always be replaced with a newly allocated object.
2902  Load(node->expression());
2903  LoadGlobal();
2904
2905  // Push the arguments ("left-to-right") on the stack.
2906  ZoneList<Expression*>* args = node->arguments();
2907  int arg_count = args->length();
2908  for (int i = 0; i < arg_count; i++) {
2909    Load(args->at(i));
2910  }
2911
2912  // Call the construct call builtin that handles allocation and
2913  // constructor invocation.
2914  CodeForSourcePosition(node->position());
2915  Result result = frame_->CallConstructor(arg_count);
2916  // Replace the function on the stack with the result.
2917  frame_->SetElementAt(0, &result);
2918}
2919
2920
2921void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
2922  if (CheckForInlineRuntimeCall(node)) {
2923    return;
2924  }
2925
2926  ZoneList<Expression*>* args = node->arguments();
2927  Comment cmnt(masm_, "[ CallRuntime");
2928  Runtime::Function* function = node->function();
2929
2930  if (function == NULL) {
2931    // Prepare stack for calling JS runtime function.
2932    frame_->Push(node->name());
2933    // Push the builtins object found in the current global object.
2934    Result temp = allocator()->Allocate();
2935    ASSERT(temp.is_valid());
2936    __ movq(temp.reg(), GlobalObject());
2937    __ movq(temp.reg(),
2938            FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
2939    frame_->Push(&temp);
2940  }
2941
2942  // Push the arguments ("left-to-right").
2943  int arg_count = args->length();
2944  for (int i = 0; i < arg_count; i++) {
2945    Load(args->at(i));
2946  }
2947
2948  if (function == NULL) {
2949    // Call the JS runtime function.
2950    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
2951                                       arg_count,
2952                                       loop_nesting_);
2953    frame_->RestoreContextRegister();
2954    frame_->SetElementAt(0, &answer);
2955  } else {
2956    // Call the C runtime function.
2957    Result answer = frame_->CallRuntime(function, arg_count);
2958    frame_->Push(&answer);
2959  }
2960}
2961
2962
2963void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
2964  Comment cmnt(masm_, "[ UnaryOperation");
2965
2966  Token::Value op = node->op();
2967
2968  if (op == Token::NOT) {
2969    // Swap the true and false targets but keep the same actual label
2970    // as the fall through.
2971    destination()->Invert();
2972    LoadCondition(node->expression(), destination(), true);
2973    // Swap the labels back.
2974    destination()->Invert();
2975
2976  } else if (op == Token::DELETE) {
2977    Property* property = node->expression()->AsProperty();
2978    if (property != NULL) {
2979      Load(property->obj());
2980      Load(property->key());
2981      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
2982      frame_->Push(&answer);
2983      return;
2984    }
2985
2986    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
2987    if (variable != NULL) {
2988      Slot* slot = variable->slot();
2989      if (variable->is_global()) {
2990        LoadGlobal();
2991        frame_->Push(variable->name());
2992        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
2993                                              CALL_FUNCTION, 2);
2994        frame_->Push(&answer);
2995        return;
2996
2997      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
2998        // Call the runtime to look up the context holding the named
2999        // variable.  Sync the virtual frame eagerly so we can push the
3000        // arguments directly into place.
3001        frame_->SyncRange(0, frame_->element_count() - 1);
3002        frame_->EmitPush(rsi);
3003        frame_->EmitPush(variable->name());
3004        Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
3005        ASSERT(context.is_register());
3006        frame_->EmitPush(context.reg());
3007        context.Unuse();
3008        frame_->EmitPush(variable->name());
3009        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
3010                                              CALL_FUNCTION, 2);
3011        frame_->Push(&answer);
3012        return;
3013      }
3014
3015      // Default: Result of deleting non-global, not dynamically
3016      // introduced variables is false.
3017      frame_->Push(Factory::false_value());
3018
3019    } else {
3020      // Default: Result of deleting expressions is true.
3021      Load(node->expression());  // may have side-effects
3022      frame_->SetElementAt(0, Factory::true_value());
3023    }
3024
3025  } else if (op == Token::TYPEOF) {
3026    // Special case for loading the typeof expression; see comment on
3027    // LoadTypeofExpression().
3028    LoadTypeofExpression(node->expression());
3029    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
3030    frame_->Push(&answer);
3031
3032  } else if (op == Token::VOID) {
3033    Expression* expression = node->expression();
3034    if (expression && expression->AsLiteral() && (
3035        expression->AsLiteral()->IsTrue() ||
3036        expression->AsLiteral()->IsFalse() ||
3037        expression->AsLiteral()->handle()->IsNumber() ||
3038        expression->AsLiteral()->handle()->IsString() ||
3039        expression->AsLiteral()->handle()->IsJSRegExp() ||
3040        expression->AsLiteral()->IsNull())) {
3041      // Omit evaluating the value of the primitive literal.
3042      // It will be discarded anyway, and can have no side effect.
3043      frame_->Push(Factory::undefined_value());
3044    } else {
3045      Load(node->expression());
3046      frame_->SetElementAt(0, Factory::undefined_value());
3047    }
3048
3049  } else {
3050    bool overwrite =
3051      (node->expression()->AsBinaryOperation() != NULL &&
3052       node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
3053    Load(node->expression());
3054    switch (op) {
3055      case Token::NOT:
3056      case Token::DELETE:
3057      case Token::TYPEOF:
3058        UNREACHABLE();  // handled above
3059        break;
3060
3061      case Token::SUB: {
3062        GenericUnaryOpStub stub(Token::SUB, overwrite);
3063        // TODO(1222589): remove dependency of TOS being cached inside stub
3064        Result operand = frame_->Pop();
3065        Result answer = frame_->CallStub(&stub, &operand);
3066        frame_->Push(&answer);
3067        break;
3068      }
3069
3070      case Token::BIT_NOT: {
3071        // Smi check.
3072        JumpTarget smi_label;
3073        JumpTarget continue_label;
3074        Result operand = frame_->Pop();
3075        operand.ToRegister();
3076
3077        Condition is_smi = masm_->CheckSmi(operand.reg());
3078        smi_label.Branch(is_smi, &operand);
3079
3080        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
3081        Result answer = frame_->CallStub(&stub, &operand);
3082        continue_label.Jump(&answer);
3083
3084        smi_label.Bind(&answer);
3085        answer.ToRegister();
3086        frame_->Spill(answer.reg());
3087        __ SmiNot(answer.reg(), answer.reg());
3088        continue_label.Bind(&answer);
3089        frame_->Push(&answer);
3090        break;
3091      }
3092
3093      case Token::ADD: {
3094        // Smi check.
3095        JumpTarget continue_label;
3096        Result operand = frame_->Pop();
3097        operand.ToRegister();
3098        Condition is_smi = masm_->CheckSmi(operand.reg());
3099        continue_label.Branch(is_smi, &operand);
3100        frame_->Push(&operand);
3101        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
3102                                              CALL_FUNCTION, 1);
3103
3104        continue_label.Bind(&answer);
3105        frame_->Push(&answer);
3106        break;
3107      }
3108
3109      default:
3110        UNREACHABLE();
3111    }
3112  }
3113}
3114
3115
3116// The value in dst was optimistically incremented or decremented.  The
3117// result overflowed or was not smi tagged.  Undo the operation, call
3118// into the runtime to convert the argument to a number, and call the
3119// specialized add or subtract stub.  The result is left in dst.
3120class DeferredPrefixCountOperation: public DeferredCode {
3121 public:
3122  DeferredPrefixCountOperation(Register dst, bool is_increment)
3123      : dst_(dst), is_increment_(is_increment) {
3124    set_comment("[ DeferredCountOperation");
3125  }
3126
3127  virtual void Generate();
3128
3129 private:
3130  Register dst_;
3131  bool is_increment_;
3132};
3133
3134
3135void DeferredPrefixCountOperation::Generate() {
3136  __ push(dst_);
3137  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3138  __ push(rax);
3139  __ Push(Smi::FromInt(1));
3140  if (is_increment_) {
3141    __ CallRuntime(Runtime::kNumberAdd, 2);
3142  } else {
3143    __ CallRuntime(Runtime::kNumberSub, 2);
3144  }
3145  if (!dst_.is(rax)) __ movq(dst_, rax);
3146}
3147
3148
3149// The value in dst was optimistically incremented or decremented.  The
3150// result overflowed or was not smi tagged.  Undo the operation and call
3151// into the runtime to convert the argument to a number.  Update the
3152// original value in old.  Call the specialized add or subtract stub.
3153// The result is left in dst.
3154class DeferredPostfixCountOperation: public DeferredCode {
3155 public:
3156  DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
3157      : dst_(dst), old_(old), is_increment_(is_increment) {
3158    set_comment("[ DeferredCountOperation");
3159  }
3160
3161  virtual void Generate();
3162
3163 private:
3164  Register dst_;
3165  Register old_;
3166  bool is_increment_;
3167};
3168
3169
3170void DeferredPostfixCountOperation::Generate() {
3171  __ push(dst_);
3172  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
3173
3174  // Save the result of ToNumber to use as the old value.
3175  __ push(rax);
3176
3177  // Call the runtime for the addition or subtraction.
3178  __ push(rax);
3179  __ Push(Smi::FromInt(1));
3180  if (is_increment_) {
3181    __ CallRuntime(Runtime::kNumberAdd, 2);
3182  } else {
3183    __ CallRuntime(Runtime::kNumberSub, 2);
3184  }
3185  if (!dst_.is(rax)) __ movq(dst_, rax);
3186  __ pop(old_);
3187}
3188
3189
3190void CodeGenerator::VisitCountOperation(CountOperation* node) {
3191  Comment cmnt(masm_, "[ CountOperation");
3192
3193  bool is_postfix = node->is_postfix();
3194  bool is_increment = node->op() == Token::INC;
3195
3196  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
3197  bool is_const = (var != NULL && var->mode() == Variable::CONST);
3198
3199  // Postfix operations need a stack slot under the reference to hold
3200  // the old value while the new value is being stored.  This is so that
3201  // in the case that storing the new value requires a call, the old
3202  // value will be in the frame to be spilled.
3203  if (is_postfix) frame_->Push(Smi::FromInt(0));
3204
3205  // A constant reference is not saved to, so the reference is not a
3206  // compound assignment reference.
3207  { Reference target(this, node->expression(), !is_const);
3208    if (target.is_illegal()) {
3209      // Spoof the virtual frame to have the expected height (one higher
3210      // than on entry).
3211      if (!is_postfix) frame_->Push(Smi::FromInt(0));
3212      return;
3213    }
3214    target.TakeValue();
3215
3216    Result new_value = frame_->Pop();
3217    new_value.ToRegister();
3218
3219    Result old_value;  // Only allocated in the postfix case.
3220    if (is_postfix) {
3221      // Allocate a temporary to preserve the old value.
3222      old_value = allocator_->Allocate();
3223      ASSERT(old_value.is_valid());
3224      __ movq(old_value.reg(), new_value.reg());
3225    }
3226    // Ensure the new value is writable.
3227    frame_->Spill(new_value.reg());
3228
3229    DeferredCode* deferred = NULL;
3230    if (is_postfix) {
3231      deferred = new DeferredPostfixCountOperation(new_value.reg(),
3232                                                   old_value.reg(),
3233                                                   is_increment);
3234    } else {
3235      deferred = new DeferredPrefixCountOperation(new_value.reg(),
3236                                                  is_increment);
3237    }
3238
3239    __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
3240    if (is_increment) {
3241      __ SmiAddConstant(kScratchRegister,
3242                        new_value.reg(),
3243                        Smi::FromInt(1),
3244                        deferred->entry_label());
3245    } else {
3246      __ SmiSubConstant(kScratchRegister,
3247                        new_value.reg(),
3248                        Smi::FromInt(1),
3249                        deferred->entry_label());
3250    }
3251    __ movq(new_value.reg(), kScratchRegister);
3252    deferred->BindExit();
3253
3254    // Postfix: store the old value in the allocated slot under the
3255    // reference.
3256    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3257
3258    frame_->Push(&new_value);
3259    // Non-constant: update the reference.
3260    if (!is_const) target.SetValue(NOT_CONST_INIT);
3261  }
3262
3263  // Postfix: drop the new value and use the old.
3264  if (is_postfix) frame_->Drop();
3265}
3266
3267
3268void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
3269  // TODO(X64): This code was copied verbatim from codegen-ia32.
3270  //     Either find a reason to change it or move it to a shared location.
3271
3272  Comment cmnt(masm_, "[ BinaryOperation");
3273  Token::Value op = node->op();
3274
3275  // According to ECMA-262 section 11.11, page 58, the binary logical
3276  // operators must yield the result of one of the two expressions
3277  // before any ToBoolean() conversions. This means that the value
3278  // produced by a && or || operator is not necessarily a boolean.
3279
3280  // NOTE: If the left hand side produces a materialized value (not
3281  // control flow), we force the right hand side to do the same. This
3282  // is necessary because we assume that if we get control flow on the
3283  // last path out of an expression we got it on all paths.
3284  if (op == Token::AND) {
3285    JumpTarget is_true;
3286    ControlDestination dest(&is_true, destination()->false_target(), true);
3287    LoadCondition(node->left(), &dest, false);
3288
3289    if (dest.false_was_fall_through()) {
3290      // The current false target was used as the fall-through.  If
3291      // there are no dangling jumps to is_true then the left
3292      // subexpression was unconditionally false.  Otherwise we have
3293      // paths where we do have to evaluate the right subexpression.
3294      if (is_true.is_linked()) {
3295        // We need to compile the right subexpression.  If the jump to
3296        // the current false target was a forward jump then we have a
3297        // valid frame, we have just bound the false target, and we
3298        // have to jump around the code for the right subexpression.
3299        if (has_valid_frame()) {
3300          destination()->false_target()->Unuse();
3301          destination()->false_target()->Jump();
3302        }
3303        is_true.Bind();
3304        // The left subexpression compiled to control flow, so the
3305        // right one is free to do so as well.
3306        LoadCondition(node->right(), destination(), false);
3307      } else {
3308        // We have actually just jumped to or bound the current false
3309        // target but the current control destination is not marked as
3310        // used.
3311        destination()->Use(false);
3312      }
3313
3314    } else if (dest.is_used()) {
3315      // The left subexpression compiled to control flow (and is_true
3316      // was just bound), so the right is free to do so as well.
3317      LoadCondition(node->right(), destination(), false);
3318
3319    } else {
3320      // We have a materialized value on the frame, so we exit with
3321      // one on all paths.  There are possibly also jumps to is_true
3322      // from nested subexpressions.
3323      JumpTarget pop_and_continue;
3324      JumpTarget exit;
3325
3326      // Avoid popping the result if it converts to 'false' using the
3327      // standard ToBoolean() conversion as described in ECMA-262,
3328      // section 9.2, page 30.
3329      //
3330      // Duplicate the TOS value. The duplicate will be popped by
3331      // ToBoolean.
3332      frame_->Dup();
3333      ControlDestination dest(&pop_and_continue, &exit, true);
3334      ToBoolean(&dest);
3335
3336      // Pop the result of evaluating the first part.
3337      frame_->Drop();
3338
3339      // Compile right side expression.
3340      is_true.Bind();
3341      Load(node->right());
3342
3343      // Exit (always with a materialized value).
3344      exit.Bind();
3345    }
3346
3347  } else if (op == Token::OR) {
3348    JumpTarget is_false;
3349    ControlDestination dest(destination()->true_target(), &is_false, false);
3350    LoadCondition(node->left(), &dest, false);
3351
3352    if (dest.true_was_fall_through()) {
3353      // The current true target was used as the fall-through.  If
3354      // there are no dangling jumps to is_false then the left
3355      // subexpression was unconditionally true.  Otherwise we have
3356      // paths where we do have to evaluate the right subexpression.
3357      if (is_false.is_linked()) {
3358        // We need to compile the right subexpression.  If the jump to
3359        // the current true target was a forward jump then we have a
3360        // valid frame, we have just bound the true target, and we
3361        // have to jump around the code for the right subexpression.
3362        if (has_valid_frame()) {
3363          destination()->true_target()->Unuse();
3364          destination()->true_target()->Jump();
3365        }
3366        is_false.Bind();
3367        // The left subexpression compiled to control flow, so the
3368        // right one is free to do so as well.
3369        LoadCondition(node->right(), destination(), false);
3370      } else {
3371        // We have just jumped to or bound the current true target but
3372        // the current control destination is not marked as used.
3373        destination()->Use(true);
3374      }
3375
3376    } else if (dest.is_used()) {
3377      // The left subexpression compiled to control flow (and is_false
3378      // was just bound), so the right is free to do so as well.
3379      LoadCondition(node->right(), destination(), false);
3380
3381    } else {
3382      // We have a materialized value on the frame, so we exit with
3383      // one on all paths.  There are possibly also jumps to is_false
3384      // from nested subexpressions.
3385      JumpTarget pop_and_continue;
3386      JumpTarget exit;
3387
3388      // Avoid popping the result if it converts to 'true' using the
3389      // standard ToBoolean() conversion as described in ECMA-262,
3390      // section 9.2, page 30.
3391      //
3392      // Duplicate the TOS value. The duplicate will be popped by
3393      // ToBoolean.
3394      frame_->Dup();
3395      ControlDestination dest(&exit, &pop_and_continue, false);
3396      ToBoolean(&dest);
3397
3398      // Pop the result of evaluating the first part.
3399      frame_->Drop();
3400
3401      // Compile right side expression.
3402      is_false.Bind();
3403      Load(node->right());
3404
3405      // Exit (always with a materialized value).
3406      exit.Bind();
3407    }
3408
3409  } else {
3410    // NOTE: The code below assumes that the slow cases (calls to runtime)
3411    // never return a constant/immutable object.
3412    OverwriteMode overwrite_mode = NO_OVERWRITE;
3413    if (node->left()->AsBinaryOperation() != NULL &&
3414        node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3415      overwrite_mode = OVERWRITE_LEFT;
3416    } else if (node->right()->AsBinaryOperation() != NULL &&
3417               node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
3418      overwrite_mode = OVERWRITE_RIGHT;
3419    }
3420
3421    Load(node->left());
3422    Load(node->right());
3423    GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
3424  }
3425}
3426
3427
3428
3429void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
3430  Comment cmnt(masm_, "[ CompareOperation");
3431
3432  // Get the expressions from the node.
3433  Expression* left = node->left();
3434  Expression* right = node->right();
3435  Token::Value op = node->op();
3436  // To make typeof testing for natives implemented in JavaScript really
3437  // efficient, we generate special code for expressions of the form:
3438  // 'typeof <expression> == <string>'.
3439  UnaryOperation* operation = left->AsUnaryOperation();
3440  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
3441      (operation != NULL && operation->op() == Token::TYPEOF) &&
3442      (right->AsLiteral() != NULL &&
3443       right->AsLiteral()->handle()->IsString())) {
3444    Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
3445
3446    // Load the operand and move it to a register.
3447    LoadTypeofExpression(operation->expression());
3448    Result answer = frame_->Pop();
3449    answer.ToRegister();
3450
3451    if (check->Equals(Heap::number_symbol())) {
3452      Condition is_smi = masm_->CheckSmi(answer.reg());
3453      destination()->true_target()->Branch(is_smi);
3454      frame_->Spill(answer.reg());
3455      __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
3456      __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
3457      answer.Unuse();
3458      destination()->Split(equal);
3459
3460    } else if (check->Equals(Heap::string_symbol())) {
3461      Condition is_smi = masm_->CheckSmi(answer.reg());
3462      destination()->false_target()->Branch(is_smi);
3463
3464      // It can be an undetectable string object.
3465      __ movq(kScratchRegister,
3466              FieldOperand(answer.reg(), HeapObject::kMapOffset));
3467      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3468               Immediate(1 << Map::kIsUndetectable));
3469      destination()->false_target()->Branch(not_zero);
3470      __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
3471      answer.Unuse();
3472      destination()->Split(below);  // Unsigned byte comparison needed.
3473
3474    } else if (check->Equals(Heap::boolean_symbol())) {
3475      __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
3476      destination()->true_target()->Branch(equal);
3477      __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
3478      answer.Unuse();
3479      destination()->Split(equal);
3480
3481    } else if (check->Equals(Heap::undefined_symbol())) {
3482      __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
3483      destination()->true_target()->Branch(equal);
3484
3485      Condition is_smi = masm_->CheckSmi(answer.reg());
3486      destination()->false_target()->Branch(is_smi);
3487
3488      // It can be an undetectable object.
3489      __ movq(kScratchRegister,
3490              FieldOperand(answer.reg(), HeapObject::kMapOffset));
3491      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3492               Immediate(1 << Map::kIsUndetectable));
3493      answer.Unuse();
3494      destination()->Split(not_zero);
3495
3496    } else if (check->Equals(Heap::function_symbol())) {
3497      Condition is_smi = masm_->CheckSmi(answer.reg());
3498      destination()->false_target()->Branch(is_smi);
3499      frame_->Spill(answer.reg());
3500      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
3501      destination()->true_target()->Branch(equal);
3502      // Regular expressions are callable so typeof == 'function'.
3503      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
3504      answer.Unuse();
3505      destination()->Split(equal);
3506
3507    } else if (check->Equals(Heap::object_symbol())) {
3508      Condition is_smi = masm_->CheckSmi(answer.reg());
3509      destination()->false_target()->Branch(is_smi);
3510      __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
3511      destination()->true_target()->Branch(equal);
3512
3513      // Regular expressions are typeof == 'function', not 'object'.
3514      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
3515      destination()->false_target()->Branch(equal);
3516
3517      // It can be an undetectable object.
3518      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3519               Immediate(1 << Map::kIsUndetectable));
3520      destination()->false_target()->Branch(not_zero);
3521      __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3522      destination()->false_target()->Branch(below);
3523      __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3524      answer.Unuse();
3525      destination()->Split(below_equal);
3526    } else {
3527      // Uncommon case: typeof testing against a string literal that is
3528      // never returned from the typeof operator.
3529      answer.Unuse();
3530      destination()->Goto(false);
3531    }
3532    return;
3533  }
3534
3535  Condition cc = no_condition;
3536  bool strict = false;
3537  switch (op) {
3538    case Token::EQ_STRICT:
3539      strict = true;
3540      // Fall through
3541    case Token::EQ:
3542      cc = equal;
3543      break;
3544    case Token::LT:
3545      cc = less;
3546      break;
3547    case Token::GT:
3548      cc = greater;
3549      break;
3550    case Token::LTE:
3551      cc = less_equal;
3552      break;
3553    case Token::GTE:
3554      cc = greater_equal;
3555      break;
3556    case Token::IN: {
3557      Load(left);
3558      Load(right);
3559      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
3560      frame_->Push(&answer);  // push the result
3561      return;
3562    }
3563    case Token::INSTANCEOF: {
3564      Load(left);
3565      Load(right);
3566      InstanceofStub stub;
3567      Result answer = frame_->CallStub(&stub, 2);
3568      answer.ToRegister();
3569      __ testq(answer.reg(), answer.reg());
3570      answer.Unuse();
3571      destination()->Split(zero);
3572      return;
3573    }
3574    default:
3575      UNREACHABLE();
3576  }
3577  Load(left);
3578  Load(right);
3579  Comparison(cc, strict, destination());
3580}
3581
3582
3583void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3584  frame_->PushFunction();
3585}
3586
3587
3588void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
3589  ASSERT(args->length() == 1);
3590
3591  // ArgumentsAccessStub expects the key in rdx and the formal
3592  // parameter count in rax.
3593  Load(args->at(0));
3594  Result key = frame_->Pop();
3595  // Explicitly create a constant result.
3596  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3597  // Call the shared stub to get to arguments[key].
3598  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
3599  Result result = frame_->CallStub(&stub, &key, &count);
3600  frame_->Push(&result);
3601}
3602
3603
3604void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3605  ASSERT(args->length() == 1);
3606  Load(args->at(0));
3607  Result value = frame_->Pop();
3608  value.ToRegister();
3609  ASSERT(value.is_valid());
3610  Condition is_smi = masm_->CheckSmi(value.reg());
3611  destination()->false_target()->Branch(is_smi);
3612  // It is a heap object - get map.
3613  // Check if the object is a JS array or not.
3614  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
3615  value.Unuse();
3616  destination()->Split(equal);
3617}
3618
3619
3620void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
3621  // This generates a fast version of:
3622  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
3623  ASSERT(args->length() == 1);
3624  Load(args->at(0));
3625  Result obj = frame_->Pop();
3626  obj.ToRegister();
3627  Condition is_smi = masm_->CheckSmi(obj.reg());
3628  destination()->false_target()->Branch(is_smi);
3629
3630  __ Move(kScratchRegister, Factory::null_value());
3631  __ cmpq(obj.reg(), kScratchRegister);
3632  destination()->true_target()->Branch(equal);
3633
3634  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
3635  // Undetectable objects behave like undefined when tested with typeof.
3636  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3637          Immediate(1 << Map::kIsUndetectable));
3638  destination()->false_target()->Branch(not_zero);
3639  __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
3640  destination()->false_target()->Branch(less);
3641  __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3642  obj.Unuse();
3643  destination()->Split(less_equal);
3644}
3645
3646
3647void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
3648  // This generates a fast version of:
3649  // (%_ClassOf(arg) === 'Function')
3650  ASSERT(args->length() == 1);
3651  Load(args->at(0));
3652  Result obj = frame_->Pop();
3653  obj.ToRegister();
3654  Condition is_smi = masm_->CheckSmi(obj.reg());
3655  destination()->false_target()->Branch(is_smi);
3656  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
3657  obj.Unuse();
3658  destination()->Split(equal);
3659}
3660
3661
3662void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
3663  ASSERT(args->length() == 1);
3664  Load(args->at(0));
3665  Result obj = frame_->Pop();
3666  obj.ToRegister();
3667  Condition is_smi = masm_->CheckSmi(obj.reg());
3668  destination()->false_target()->Branch(is_smi);
3669  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
3670  __ movzxbl(kScratchRegister,
3671             FieldOperand(kScratchRegister, Map::kBitFieldOffset));
3672  __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
3673  obj.Unuse();
3674  destination()->Split(not_zero);
3675}
3676
3677
3678void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3679  ASSERT(args->length() == 0);
3680
3681  // Get the frame pointer for the calling frame.
3682  Result fp = allocator()->Allocate();
3683  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3684
3685  // Skip the arguments adaptor frame if it exists.
3686  Label check_frame_marker;
3687  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
3688                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3689  __ j(not_equal, &check_frame_marker);
3690  __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
3691
3692  // Check the marker in the calling frame.
3693  __ bind(&check_frame_marker);
3694  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
3695                Smi::FromInt(StackFrame::CONSTRUCT));
3696  fp.Unuse();
3697  destination()->Split(equal);
3698}
3699
3700
3701void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
3702  ASSERT(args->length() == 0);
3703  // ArgumentsAccessStub takes the parameter count as an input argument
3704  // in register eax.  Create a constant result for it.
3705  Result count(Handle<Smi>(Smi::FromInt(scope_->num_parameters())));
3706  // Call the shared stub to get to the arguments.length.
3707  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
3708  Result result = frame_->CallStub(&stub, &count);
3709  frame_->Push(&result);
3710}
3711
3712
3713void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
3714  Comment(masm_, "[ GenerateFastCharCodeAt");
3715  ASSERT(args->length() == 2);
3716
3717  Label slow_case;
3718  Label end;
3719  Label not_a_flat_string;
3720  Label try_again_with_new_string;
3721  Label ascii_string;
3722  Label got_char_code;
3723
3724  Load(args->at(0));
3725  Load(args->at(1));
3726  Result index = frame_->Pop();
3727  Result object = frame_->Pop();
3728
3729  // Get register rcx to use as shift amount later.
3730  Result shift_amount;
3731  if (object.is_register() && object.reg().is(rcx)) {
3732    Result fresh = allocator_->Allocate();
3733    shift_amount = object;
3734    object = fresh;
3735    __ movq(object.reg(), rcx);
3736  }
3737  if (index.is_register() && index.reg().is(rcx)) {
3738    Result fresh = allocator_->Allocate();
3739    shift_amount = index;
3740    index = fresh;
3741    __ movq(index.reg(), rcx);
3742  }
3743  // There could be references to ecx in the frame. Allocating will
3744  // spill them, otherwise spill explicitly.
3745  if (shift_amount.is_valid()) {
3746    frame_->Spill(rcx);
3747  } else {
3748    shift_amount = allocator()->Allocate(rcx);
3749  }
3750  ASSERT(shift_amount.is_register());
3751  ASSERT(shift_amount.reg().is(rcx));
3752  ASSERT(allocator_->count(rcx) == 1);
3753
3754  // We will mutate the index register and possibly the object register.
3755  // The case where they are somehow the same register is handled
3756  // because we only mutate them in the case where the receiver is a
3757  // heap object and the index is not.
3758  object.ToRegister();
3759  index.ToRegister();
3760  frame_->Spill(object.reg());
3761  frame_->Spill(index.reg());
3762
3763  // We need a single extra temporary register.
3764  Result temp = allocator()->Allocate();
3765  ASSERT(temp.is_valid());
3766
3767  // There is no virtual frame effect from here up to the final result
3768  // push.
3769
3770  // If the receiver is a smi trigger the slow case.
3771  __ JumpIfSmi(object.reg(), &slow_case);
3772
3773  // If the index is negative or non-smi trigger the slow case.
3774  __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
3775
3776  // Untag the index.
3777  __ SmiToInteger32(index.reg(), index.reg());
3778
3779  __ bind(&try_again_with_new_string);
3780  // Fetch the instance type of the receiver into rcx.
3781  __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
3782  __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
3783  // If the receiver is not a string trigger the slow case.
3784  __ testb(rcx, Immediate(kIsNotStringMask));
3785  __ j(not_zero, &slow_case);
3786
3787  // Check for index out of range.
3788  __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
3789  __ j(greater_equal, &slow_case);
3790  // Reload the instance type (into the temp register this time)..
3791  __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
3792  __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
3793
3794  // We need special handling for non-flat strings.
3795  ASSERT_EQ(0, kSeqStringTag);
3796  __ testb(temp.reg(), Immediate(kStringRepresentationMask));
3797  __ j(not_zero, &not_a_flat_string);
3798  // Check for 1-byte or 2-byte string.
3799  ASSERT_EQ(0, kTwoByteStringTag);
3800  __ testb(temp.reg(), Immediate(kStringEncodingMask));
3801  __ j(not_zero, &ascii_string);
3802
3803  // 2-byte string.
3804  // Load the 2-byte character code into the temp register.
3805  __ movzxwl(temp.reg(), FieldOperand(object.reg(),
3806                                      index.reg(),
3807                                      times_2,
3808                                      SeqTwoByteString::kHeaderSize));
3809  __ jmp(&got_char_code);
3810
3811  // ASCII string.
3812  __ bind(&ascii_string);
3813  // Load the byte into the temp register.
3814  __ movzxbl(temp.reg(), FieldOperand(object.reg(),
3815                                      index.reg(),
3816                                      times_1,
3817                                      SeqAsciiString::kHeaderSize));
3818  __ bind(&got_char_code);
3819  __ Integer32ToSmi(temp.reg(), temp.reg());
3820  __ jmp(&end);
3821
3822  // Handle non-flat strings.
3823  __ bind(&not_a_flat_string);
3824  __ and_(temp.reg(), Immediate(kStringRepresentationMask));
3825  __ cmpb(temp.reg(), Immediate(kConsStringTag));
3826  __ j(not_equal, &slow_case);
3827
3828  // ConsString.
3829  // Check that the right hand side is the empty string (ie if this is really a
3830  // flat string in a cons string).  If that is not the case we would rather go
3831  // to the runtime system now, to flatten the string.
3832  __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
3833  __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
3834  __ j(not_equal, &slow_case);
3835  // Get the first of the two strings.
3836  __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
3837  __ jmp(&try_again_with_new_string);
3838
3839  __ bind(&slow_case);
3840  // Move the undefined value into the result register, which will
3841  // trigger the slow case.
3842  __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
3843
3844  __ bind(&end);
3845  frame_->Push(&temp);
3846}
3847
3848
3849void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3850  ASSERT(args->length() == 1);
3851  Load(args->at(0));
3852  Result value = frame_->Pop();
3853  value.ToRegister();
3854  ASSERT(value.is_valid());
3855  Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
3856  value.Unuse();
3857  destination()->Split(positive_smi);
3858}
3859
3860
3861void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3862  ASSERT(args->length() == 1);
3863  Load(args->at(0));
3864  Result value = frame_->Pop();
3865  value.ToRegister();
3866  ASSERT(value.is_valid());
3867  Condition is_smi = masm_->CheckSmi(value.reg());
3868  value.Unuse();
3869  destination()->Split(is_smi);
3870}
3871
3872
3873void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3874  // Conditionally generate a log call.
3875  // Args:
3876  //   0 (literal string): The type of logging (corresponds to the flags).
3877  //     This is used to determine whether or not to generate the log call.
3878  //   1 (string): Format string.  Access the string at argument index 2
3879  //     with '%2s' (see Logger::LogRuntime for all the formats).
3880  //   2 (array): Arguments to the format string.
3881  ASSERT_EQ(args->length(), 3);
3882#ifdef ENABLE_LOGGING_AND_PROFILING
3883  if (ShouldGenerateLog(args->at(0))) {
3884    Load(args->at(1));
3885    Load(args->at(2));
3886    frame_->CallRuntime(Runtime::kLog, 2);
3887  }
3888#endif
3889  // Finally, we're expected to leave a value on the top of the stack.
3890  frame_->Push(Factory::undefined_value());
3891}
3892
3893
3894void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
3895  ASSERT(args->length() == 2);
3896
3897  // Load the two objects into registers and perform the comparison.
3898  Load(args->at(0));
3899  Load(args->at(1));
3900  Result right = frame_->Pop();
3901  Result left = frame_->Pop();
3902  right.ToRegister();
3903  left.ToRegister();
3904  __ cmpq(right.reg(), left.reg());
3905  right.Unuse();
3906  left.Unuse();
3907  destination()->Split(equal);
3908}
3909
3910
3911void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
3912  ASSERT(args->length() == 0);
3913  // RBP value is aligned, so it should be tagged as a smi (without necesarily
3914  // being padded as a smi, so it should not be treated as a smi.).
3915  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3916  Result rbp_as_smi = allocator_->Allocate();
3917  ASSERT(rbp_as_smi.is_valid());
3918  __ movq(rbp_as_smi.reg(), rbp);
3919  frame_->Push(&rbp_as_smi);
3920}
3921
3922
3923void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3924  ASSERT(args->length() == 0);
3925  frame_->SpillAll();
3926  __ push(rsi);
3927
3928  // Make sure the frame is aligned like the OS expects.
3929  static const int kFrameAlignment = OS::ActivationFrameAlignment();
3930  if (kFrameAlignment > 0) {
3931    ASSERT(IsPowerOf2(kFrameAlignment));
3932    __ movq(rbx, rsp);  // Save in AMD-64 abi callee-saved register.
3933    __ and_(rsp, Immediate(-kFrameAlignment));
3934  }
3935
3936  // Call V8::RandomPositiveSmi().
3937  __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
3938
3939  // Restore stack pointer from callee-saved register.
3940  if (kFrameAlignment > 0) {
3941    __ movq(rsp, rbx);
3942  }
3943
3944  __ pop(rsi);
3945  Result result = allocator_->Allocate(rax);
3946  frame_->Push(&result);
3947}
3948
3949
3950void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
3951  ASSERT_EQ(args->length(), 4);
3952
3953  // Load the arguments on the stack and call the runtime system.
3954  Load(args->at(0));
3955  Load(args->at(1));
3956  Load(args->at(2));
3957  Load(args->at(3));
3958  Result result = frame_->CallRuntime(Runtime::kRegExpExec, 4);
3959  frame_->Push(&result);
3960}
3961
3962
3963void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
3964  ASSERT_EQ(2, args->length());
3965
3966  Load(args->at(0));
3967  Load(args->at(1));
3968
3969  StringAddStub stub(NO_STRING_ADD_FLAGS);
3970  Result answer = frame_->CallStub(&stub, 2);
3971  frame_->Push(&answer);
3972}
3973
3974
3975void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
3976  ASSERT_EQ(3, args->length());
3977
3978  Load(args->at(0));
3979  Load(args->at(1));
3980  Load(args->at(2));
3981
3982  SubStringStub stub;
3983  Result answer = frame_->CallStub(&stub, 3);
3984  frame_->Push(&answer);
3985}
3986
3987
3988void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
3989  ASSERT_EQ(2, args->length());
3990
3991  Load(args->at(0));
3992  Load(args->at(1));
3993
3994  StringCompareStub stub;
3995  Result answer = frame_->CallStub(&stub, 2);
3996  frame_->Push(&answer);
3997}
3998
3999
4000void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
4001  ASSERT(args->length() == 1);
4002  JumpTarget leave, null, function, non_function_constructor;
4003  Load(args->at(0));  // Load the object.
4004  Result obj = frame_->Pop();
4005  obj.ToRegister();
4006  frame_->Spill(obj.reg());
4007
4008  // If the object is a smi, we return null.
4009  Condition is_smi = masm_->CheckSmi(obj.reg());
4010  null.Branch(is_smi);
4011
4012  // Check that the object is a JS object but take special care of JS
4013  // functions to make sure they have 'Function' as their class.
4014
4015  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
4016  null.Branch(below);
4017
4018  // As long as JS_FUNCTION_TYPE is the last instance type and it is
4019  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4020  // LAST_JS_OBJECT_TYPE.
4021  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4022  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
4023  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
4024  function.Branch(equal);
4025
4026  // Check if the constructor in the map is a function.
4027  __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
4028  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
4029  non_function_constructor.Branch(not_equal);
4030
4031  // The obj register now contains the constructor function. Grab the
4032  // instance class name from there.
4033  __ movq(obj.reg(),
4034          FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
4035  __ movq(obj.reg(),
4036          FieldOperand(obj.reg(),
4037                       SharedFunctionInfo::kInstanceClassNameOffset));
4038  frame_->Push(&obj);
4039  leave.Jump();
4040
4041  // Functions have class 'Function'.
4042  function.Bind();
4043  frame_->Push(Factory::function_class_symbol());
4044  leave.Jump();
4045
4046  // Objects with a non-function constructor have class 'Object'.
4047  non_function_constructor.Bind();
4048  frame_->Push(Factory::Object_symbol());
4049  leave.Jump();
4050
4051  // Non-JS objects have class null.
4052  null.Bind();
4053  frame_->Push(Factory::null_value());
4054
4055  // All done.
4056  leave.Bind();
4057}
4058
4059
4060void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
4061  ASSERT(args->length() == 2);
4062  JumpTarget leave;
4063  Load(args->at(0));  // Load the object.
4064  Load(args->at(1));  // Load the value.
4065  Result value = frame_->Pop();
4066  Result object = frame_->Pop();
4067  value.ToRegister();
4068  object.ToRegister();
4069
4070  // if (object->IsSmi()) return value.
4071  Condition is_smi = masm_->CheckSmi(object.reg());
4072  leave.Branch(is_smi, &value);
4073
4074  // It is a heap object - get its map.
4075  Result scratch = allocator_->Allocate();
4076  ASSERT(scratch.is_valid());
4077  // if (!object->IsJSValue()) return value.
4078  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
4079  leave.Branch(not_equal, &value);
4080
4081  // Store the value.
4082  __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
4083  // Update the write barrier.  Save the value as it will be
4084  // overwritten by the write barrier code and is needed afterward.
4085  Result duplicate_value = allocator_->Allocate();
4086  ASSERT(duplicate_value.is_valid());
4087  __ movq(duplicate_value.reg(), value.reg());
4088  // The object register is also overwritten by the write barrier and
4089  // possibly aliased in the frame.
4090  frame_->Spill(object.reg());
4091  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
4092                 scratch.reg());
4093  object.Unuse();
4094  scratch.Unuse();
4095  duplicate_value.Unuse();
4096
4097  // Leave.
4098  leave.Bind(&value);
4099  frame_->Push(&value);
4100}
4101
4102
4103void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4104  ASSERT(args->length() == 1);
4105  JumpTarget leave;
4106  Load(args->at(0));  // Load the object.
4107  frame_->Dup();
4108  Result object = frame_->Pop();
4109  object.ToRegister();
4110  ASSERT(object.is_valid());
4111  // if (object->IsSmi()) return object.
4112  Condition is_smi = masm_->CheckSmi(object.reg());
4113  leave.Branch(is_smi);
4114  // It is a heap object - get map.
4115  Result temp = allocator()->Allocate();
4116  ASSERT(temp.is_valid());
4117  // if (!object->IsJSValue()) return object.
4118  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
4119  leave.Branch(not_equal);
4120  __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
4121  object.Unuse();
4122  frame_->SetElementAt(0, &temp);
4123  leave.Bind();
4124}
4125
4126
4127// -----------------------------------------------------------------------------
4128// CodeGenerator implementation of Expressions
4129
4130void CodeGenerator::LoadAndSpill(Expression* expression) {
4131  // TODO(x64): No architecture specific code. Move to shared location.
4132  ASSERT(in_spilled_code());
4133  set_in_spilled_code(false);
4134  Load(expression);
4135  frame_->SpillAll();
4136  set_in_spilled_code(true);
4137}
4138
4139
4140void CodeGenerator::Load(Expression* expr) {
4141#ifdef DEBUG
4142  int original_height = frame_->height();
4143#endif
4144  ASSERT(!in_spilled_code());
4145  JumpTarget true_target;
4146  JumpTarget false_target;
4147  ControlDestination dest(&true_target, &false_target, true);
4148  LoadCondition(expr, &dest, false);
4149
4150  if (dest.false_was_fall_through()) {
4151    // The false target was just bound.
4152    JumpTarget loaded;
4153    frame_->Push(Factory::false_value());
4154    // There may be dangling jumps to the true target.
4155    if (true_target.is_linked()) {
4156      loaded.Jump();
4157      true_target.Bind();
4158      frame_->Push(Factory::true_value());
4159      loaded.Bind();
4160    }
4161
4162  } else if (dest.is_used()) {
4163    // There is true, and possibly false, control flow (with true as
4164    // the fall through).
4165    JumpTarget loaded;
4166    frame_->Push(Factory::true_value());
4167    if (false_target.is_linked()) {
4168      loaded.Jump();
4169      false_target.Bind();
4170      frame_->Push(Factory::false_value());
4171      loaded.Bind();
4172    }
4173
4174  } else {
4175    // We have a valid value on top of the frame, but we still may
4176    // have dangling jumps to the true and false targets from nested
4177    // subexpressions (eg, the left subexpressions of the
4178    // short-circuited boolean operators).
4179    ASSERT(has_valid_frame());
4180    if (true_target.is_linked() || false_target.is_linked()) {
4181      JumpTarget loaded;
4182      loaded.Jump();  // Don't lose the current TOS.
4183      if (true_target.is_linked()) {
4184        true_target.Bind();
4185        frame_->Push(Factory::true_value());
4186        if (false_target.is_linked()) {
4187          loaded.Jump();
4188        }
4189      }
4190      if (false_target.is_linked()) {
4191        false_target.Bind();
4192        frame_->Push(Factory::false_value());
4193      }
4194      loaded.Bind();
4195    }
4196  }
4197
4198  ASSERT(has_valid_frame());
4199  ASSERT(frame_->height() == original_height + 1);
4200}
4201
4202
4203// Emit code to load the value of an expression to the top of the
4204// frame. If the expression is boolean-valued it may be compiled (or
4205// partially compiled) into control flow to the control destination.
4206// If force_control is true, control flow is forced.
4207void CodeGenerator::LoadCondition(Expression* x,
4208                                  ControlDestination* dest,
4209                                  bool force_control) {
4210  ASSERT(!in_spilled_code());
4211  int original_height = frame_->height();
4212
4213  { CodeGenState new_state(this, dest);
4214    Visit(x);
4215
4216    // If we hit a stack overflow, we may not have actually visited
4217    // the expression.  In that case, we ensure that we have a
4218    // valid-looking frame state because we will continue to generate
4219    // code as we unwind the C++ stack.
4220    //
4221    // It's possible to have both a stack overflow and a valid frame
4222    // state (eg, a subexpression overflowed, visiting it returned
4223    // with a dummied frame state, and visiting this expression
4224    // returned with a normal-looking state).
4225    if (HasStackOverflow() &&
4226        !dest->is_used() &&
4227        frame_->height() == original_height) {
4228      dest->Goto(true);
4229    }
4230  }
4231
4232  if (force_control && !dest->is_used()) {
4233    // Convert the TOS value into flow to the control destination.
4234    // TODO(X64): Make control flow to control destinations work.
4235    ToBoolean(dest);
4236  }
4237
4238  ASSERT(!(force_control && !dest->is_used()));
4239  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
4240}
4241
4242
4243// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
4244// convert it to a boolean in the condition code register or jump to
4245// 'false_target'/'true_target' as appropriate.
4246void CodeGenerator::ToBoolean(ControlDestination* dest) {
4247  Comment cmnt(masm_, "[ ToBoolean");
4248
4249  // The value to convert should be popped from the frame.
4250  Result value = frame_->Pop();
4251  value.ToRegister();
4252  // Fast case checks.
4253
4254  // 'false' => false.
4255  __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
4256  dest->false_target()->Branch(equal);
4257
4258  // 'true' => true.
4259  __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
4260  dest->true_target()->Branch(equal);
4261
4262  // 'undefined' => false.
4263  __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4264  dest->false_target()->Branch(equal);
4265
4266  // Smi => false iff zero.
4267  __ SmiCompare(value.reg(), Smi::FromInt(0));
4268  dest->false_target()->Branch(equal);
4269  Condition is_smi = masm_->CheckSmi(value.reg());
4270  dest->true_target()->Branch(is_smi);
4271
4272  // Call the stub for all other cases.
4273  frame_->Push(&value);  // Undo the Pop() from above.
4274  ToBooleanStub stub;
4275  Result temp = frame_->CallStub(&stub, 1);
4276  // Convert the result to a condition code.
4277  __ testq(temp.reg(), temp.reg());
4278  temp.Unuse();
4279  dest->Split(not_equal);
4280}
4281
4282
4283void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
4284  UNIMPLEMENTED();
4285  // TODO(X64): Implement security policy for loads of smis.
4286}
4287
4288
4289bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
4290  return false;
4291}
4292
4293//------------------------------------------------------------------------------
4294// CodeGenerator implementation of variables, lookups, and stores.
4295
4296Reference::Reference(CodeGenerator* cgen,
4297                     Expression* expression,
4298                     bool  persist_after_get)
4299    : cgen_(cgen),
4300      expression_(expression),
4301      type_(ILLEGAL),
4302      persist_after_get_(persist_after_get) {
4303  cgen->LoadReference(this);
4304}
4305
4306
4307Reference::~Reference() {
4308  ASSERT(is_unloaded() || is_illegal());
4309}
4310
4311
4312void CodeGenerator::LoadReference(Reference* ref) {
4313  // References are loaded from both spilled and unspilled code.  Set the
4314  // state to unspilled to allow that (and explicitly spill after
4315  // construction at the construction sites).
4316  bool was_in_spilled_code = in_spilled_code_;
4317  in_spilled_code_ = false;
4318
4319  Comment cmnt(masm_, "[ LoadReference");
4320  Expression* e = ref->expression();
4321  Property* property = e->AsProperty();
4322  Variable* var = e->AsVariableProxy()->AsVariable();
4323
4324  if (property != NULL) {
4325    // The expression is either a property or a variable proxy that rewrites
4326    // to a property.
4327    Load(property->obj());
4328    if (property->key()->IsPropertyName()) {
4329      ref->set_type(Reference::NAMED);
4330    } else {
4331      Load(property->key());
4332      ref->set_type(Reference::KEYED);
4333    }
4334  } else if (var != NULL) {
4335    // The expression is a variable proxy that does not rewrite to a
4336    // property.  Global variables are treated as named property references.
4337    if (var->is_global()) {
4338      LoadGlobal();
4339      ref->set_type(Reference::NAMED);
4340    } else {
4341      ASSERT(var->slot() != NULL);
4342      ref->set_type(Reference::SLOT);
4343    }
4344  } else {
4345    // Anything else is a runtime error.
4346    Load(e);
4347    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
4348  }
4349
4350  in_spilled_code_ = was_in_spilled_code;
4351}
4352
4353
4354void CodeGenerator::UnloadReference(Reference* ref) {
4355  // Pop a reference from the stack while preserving TOS.
4356  Comment cmnt(masm_, "[ UnloadReference");
4357  frame_->Nip(ref->size());
4358  ref->set_unloaded();
4359}
4360
4361
4362Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
4363  // Currently, this assertion will fail if we try to assign to
4364  // a constant variable that is constant because it is read-only
4365  // (such as the variable referring to a named function expression).
4366  // We need to implement assignments to read-only variables.
4367  // Ideally, we should do this during AST generation (by converting
4368  // such assignments into expression statements); however, in general
4369  // we may not be able to make the decision until past AST generation,
4370  // that is when the entire program is known.
4371  ASSERT(slot != NULL);
4372  int index = slot->index();
4373  switch (slot->type()) {
4374    case Slot::PARAMETER:
4375      return frame_->ParameterAt(index);
4376
4377    case Slot::LOCAL:
4378      return frame_->LocalAt(index);
4379
4380    case Slot::CONTEXT: {
4381      // Follow the context chain if necessary.
4382      ASSERT(!tmp.is(rsi));  // do not overwrite context register
4383      Register context = rsi;
4384      int chain_length = scope()->ContextChainLength(slot->var()->scope());
4385      for (int i = 0; i < chain_length; i++) {
4386        // Load the closure.
4387        // (All contexts, even 'with' contexts, have a closure,
4388        // and it is the same for all contexts inside a function.
4389        // There is no need to go to the function context first.)
4390        __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
4391        // Load the function context (which is the incoming, outer context).
4392        __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
4393        context = tmp;
4394      }
4395      // We may have a 'with' context now. Get the function context.
4396      // (In fact this mov may never be the needed, since the scope analysis
4397      // may not permit a direct context access in this case and thus we are
4398      // always at a function context. However it is safe to dereference be-
4399      // cause the function context of a function context is itself. Before
4400      // deleting this mov we should try to create a counter-example first,
4401      // though...)
4402      __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
4403      return ContextOperand(tmp, index);
4404    }
4405
4406    default:
4407      UNREACHABLE();
4408      return Operand(rsp, 0);
4409  }
4410}
4411
4412
4413Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
4414                                                         Result tmp,
4415                                                         JumpTarget* slow) {
4416  ASSERT(slot->type() == Slot::CONTEXT);
4417  ASSERT(tmp.is_register());
4418  Register context = rsi;
4419
4420  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
4421    if (s->num_heap_slots() > 0) {
4422      if (s->calls_eval()) {
4423        // Check that extension is NULL.
4424        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4425                Immediate(0));
4426        slow->Branch(not_equal, not_taken);
4427      }
4428      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4429      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4430      context = tmp.reg();
4431    }
4432  }
4433  // Check that last extension is NULL.
4434  __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
4435  slow->Branch(not_equal, not_taken);
4436  __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
4437  return ContextOperand(tmp.reg(), slot->index());
4438}
4439
4440
4441void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
4442  if (slot->type() == Slot::LOOKUP) {
4443    ASSERT(slot->var()->is_dynamic());
4444
4445    JumpTarget slow;
4446    JumpTarget done;
4447    Result value;
4448
4449    // Generate fast-case code for variables that might be shadowed by
4450    // eval-introduced variables.  Eval is used a lot without
4451    // introducing variables.  In those cases, we do not want to
4452    // perform a runtime call for all variables in the scope
4453    // containing the eval.
4454    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
4455      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
4456      // If there was no control flow to slow, we can exit early.
4457      if (!slow.is_linked()) {
4458        frame_->Push(&value);
4459        return;
4460      }
4461
4462      done.Jump(&value);
4463
4464    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
4465      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
4466      // Only generate the fast case for locals that rewrite to slots.
4467      // This rules out argument loads.
4468      if (potential_slot != NULL) {
4469        // Allocate a fresh register to use as a temp in
4470        // ContextSlotOperandCheckExtensions and to hold the result
4471        // value.
4472        value = allocator_->Allocate();
4473        ASSERT(value.is_valid());
4474        __ movq(value.reg(),
4475               ContextSlotOperandCheckExtensions(potential_slot,
4476                                                 value,
4477                                                 &slow));
4478        if (potential_slot->var()->mode() == Variable::CONST) {
4479          __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4480          done.Branch(not_equal, &value);
4481          __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4482        }
4483        // There is always control flow to slow from
4484        // ContextSlotOperandCheckExtensions so we have to jump around
4485        // it.
4486        done.Jump(&value);
4487      }
4488    }
4489
4490    slow.Bind();
4491    // A runtime call is inevitable.  We eagerly sync frame elements
4492    // to memory so that we can push the arguments directly into place
4493    // on top of the frame.
4494    frame_->SyncRange(0, frame_->element_count() - 1);
4495    frame_->EmitPush(rsi);
4496    __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
4497    frame_->EmitPush(kScratchRegister);
4498    if (typeof_state == INSIDE_TYPEOF) {
4499       value =
4500         frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
4501    } else {
4502       value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
4503    }
4504
4505    done.Bind(&value);
4506    frame_->Push(&value);
4507
4508  } else if (slot->var()->mode() == Variable::CONST) {
4509    // Const slots may contain 'the hole' value (the constant hasn't been
4510    // initialized yet) which needs to be converted into the 'undefined'
4511    // value.
4512    //
4513    // We currently spill the virtual frame because constants use the
4514    // potentially unsafe direct-frame access of SlotOperand.
4515    VirtualFrame::SpilledScope spilled_scope;
4516    Comment cmnt(masm_, "[ Load const");
4517    JumpTarget exit;
4518    __ movq(rcx, SlotOperand(slot, rcx));
4519    __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4520    exit.Branch(not_equal);
4521    __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
4522    exit.Bind();
4523    frame_->EmitPush(rcx);
4524
4525  } else if (slot->type() == Slot::PARAMETER) {
4526    frame_->PushParameterAt(slot->index());
4527
4528  } else if (slot->type() == Slot::LOCAL) {
4529    frame_->PushLocalAt(slot->index());
4530
4531  } else {
4532    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
4533    // here.
4534    //
4535    // The use of SlotOperand below is safe for an unspilled frame
4536    // because it will always be a context slot.
4537    ASSERT(slot->type() == Slot::CONTEXT);
4538    Result temp = allocator_->Allocate();
4539    ASSERT(temp.is_valid());
4540    __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
4541    frame_->Push(&temp);
4542  }
4543}
4544
4545
4546void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
4547                                                  TypeofState state) {
4548  LoadFromSlot(slot, state);
4549
4550  // Bail out quickly if we're not using lazy arguments allocation.
4551  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
4552
4553  // ... or if the slot isn't a non-parameter arguments slot.
4554  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
4555
4556  // Pop the loaded value from the stack.
4557  Result value = frame_->Pop();
4558
4559  // If the loaded value is a constant, we know if the arguments
4560  // object has been lazily loaded yet.
4561  if (value.is_constant()) {
4562    if (value.handle()->IsTheHole()) {
4563      Result arguments = StoreArgumentsObject(false);
4564      frame_->Push(&arguments);
4565    } else {
4566      frame_->Push(&value);
4567    }
4568    return;
4569  }
4570
4571  // The loaded value is in a register. If it is the sentinel that
4572  // indicates that we haven't loaded the arguments object yet, we
4573  // need to do it now.
4574  JumpTarget exit;
4575  __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
4576  frame_->Push(&value);
4577  exit.Branch(not_equal);
4578  Result arguments = StoreArgumentsObject(false);
4579  frame_->SetElementAt(0, &arguments);
4580  exit.Bind();
4581}
4582
4583
4584void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
4585  if (slot->type() == Slot::LOOKUP) {
4586    ASSERT(slot->var()->is_dynamic());
4587
4588    // For now, just do a runtime call.  Since the call is inevitable,
4589    // we eagerly sync the virtual frame so we can directly push the
4590    // arguments into place.
4591    frame_->SyncRange(0, frame_->element_count() - 1);
4592
4593    frame_->EmitPush(rsi);
4594    frame_->EmitPush(slot->var()->name());
4595
4596    Result value;
4597    if (init_state == CONST_INIT) {
4598      // Same as the case for a normal store, but ignores attribute
4599      // (e.g. READ_ONLY) of context slot so that we can initialize const
4600      // properties (introduced via eval("const foo = (some expr);")). Also,
4601      // uses the current function context instead of the top context.
4602      //
4603      // Note that we must declare the foo upon entry of eval(), via a
4604      // context slot declaration, but we cannot initialize it at the same
4605      // time, because the const declaration may be at the end of the eval
4606      // code (sigh...) and the const variable may have been used before
4607      // (where its value is 'undefined'). Thus, we can only do the
4608      // initialization when we actually encounter the expression and when
4609      // the expression operands are defined and valid, and thus we need the
4610      // split into 2 operations: declaration of the context slot followed
4611      // by initialization.
4612      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
4613    } else {
4614      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
4615    }
4616    // Storing a variable must keep the (new) value on the expression
4617    // stack. This is necessary for compiling chained assignment
4618    // expressions.
4619    frame_->Push(&value);
4620  } else {
4621    ASSERT(!slot->var()->is_dynamic());
4622
4623    JumpTarget exit;
4624    if (init_state == CONST_INIT) {
4625      ASSERT(slot->var()->mode() == Variable::CONST);
4626      // Only the first const initialization must be executed (the slot
4627      // still contains 'the hole' value). When the assignment is executed,
4628      // the code is identical to a normal store (see below).
4629      //
4630      // We spill the frame in the code below because the direct-frame
4631      // access of SlotOperand is potentially unsafe with an unspilled
4632      // frame.
4633      VirtualFrame::SpilledScope spilled_scope;
4634      Comment cmnt(masm_, "[ Init const");
4635      __ movq(rcx, SlotOperand(slot, rcx));
4636      __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
4637      exit.Branch(not_equal);
4638    }
4639
4640    // We must execute the store.  Storing a variable must keep the (new)
4641    // value on the stack. This is necessary for compiling assignment
4642    // expressions.
4643    //
4644    // Note: We will reach here even with slot->var()->mode() ==
4645    // Variable::CONST because of const declarations which will initialize
4646    // consts to 'the hole' value and by doing so, end up calling this code.
4647    if (slot->type() == Slot::PARAMETER) {
4648      frame_->StoreToParameterAt(slot->index());
4649    } else if (slot->type() == Slot::LOCAL) {
4650      frame_->StoreToLocalAt(slot->index());
4651    } else {
4652      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
4653      //
4654      // The use of SlotOperand below is safe for an unspilled frame
4655      // because the slot is a context slot.
4656      ASSERT(slot->type() == Slot::CONTEXT);
4657      frame_->Dup();
4658      Result value = frame_->Pop();
4659      value.ToRegister();
4660      Result start = allocator_->Allocate();
4661      ASSERT(start.is_valid());
4662      __ movq(SlotOperand(slot, start.reg()), value.reg());
4663      // RecordWrite may destroy the value registers.
4664      //
4665      // TODO(204): Avoid actually spilling when the value is not
4666      // needed (probably the common case).
4667      frame_->Spill(value.reg());
4668      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
4669      Result temp = allocator_->Allocate();
4670      ASSERT(temp.is_valid());
4671      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
4672      // The results start, value, and temp are unused by going out of
4673      // scope.
4674    }
4675
4676    exit.Bind();
4677  }
4678}
4679
4680
4681Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
4682    Slot* slot,
4683    TypeofState typeof_state,
4684    JumpTarget* slow) {
4685  // Check that no extension objects have been created by calls to
4686  // eval from the current scope to the global scope.
4687  Register context = rsi;
4688  Result tmp = allocator_->Allocate();
4689  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
4690
4691  Scope* s = scope();
4692  while (s != NULL) {
4693    if (s->num_heap_slots() > 0) {
4694      if (s->calls_eval()) {
4695        // Check that extension is NULL.
4696        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
4697               Immediate(0));
4698        slow->Branch(not_equal, not_taken);
4699      }
4700      // Load next context in chain.
4701      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
4702      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4703      context = tmp.reg();
4704    }
4705    // If no outer scope calls eval, we do not need to check more
4706    // context extensions.  If we have reached an eval scope, we check
4707    // all extensions from this point.
4708    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
4709    s = s->outer_scope();
4710  }
4711
4712  if (s->is_eval_scope()) {
4713    // Loop up the context chain.  There is no frame effect so it is
4714    // safe to use raw labels here.
4715    Label next, fast;
4716    if (!context.is(tmp.reg())) {
4717      __ movq(tmp.reg(), context);
4718    }
4719    // Load map for comparison into register, outside loop.
4720    __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
4721    __ bind(&next);
4722    // Terminate at global context.
4723    __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
4724    __ j(equal, &fast);
4725    // Check that extension is NULL.
4726    __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
4727    slow->Branch(not_equal);
4728    // Load next context in chain.
4729    __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
4730    __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
4731    __ jmp(&next);
4732    __ bind(&fast);
4733  }
4734  tmp.Unuse();
4735
4736  // All extension objects were empty and it is safe to use a global
4737  // load IC call.
4738  LoadGlobal();
4739  frame_->Push(slot->var()->name());
4740  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
4741                         ? RelocInfo::CODE_TARGET
4742                         : RelocInfo::CODE_TARGET_CONTEXT;
4743  Result answer = frame_->CallLoadIC(mode);
4744  // A test rax instruction following the call signals that the inobject
4745  // property case was inlined.  Ensure that there is not a test rax
4746  // instruction here.
4747  masm_->nop();
4748  // Discard the global object. The result is in answer.
4749  frame_->Drop();
4750  return answer;
4751}
4752
4753
4754void CodeGenerator::LoadGlobal() {
4755  if (in_spilled_code()) {
4756    frame_->EmitPush(GlobalObject());
4757  } else {
4758    Result temp = allocator_->Allocate();
4759    __ movq(temp.reg(), GlobalObject());
4760    frame_->Push(&temp);
4761  }
4762}
4763
4764
4765void CodeGenerator::LoadGlobalReceiver() {
4766  Result temp = allocator_->Allocate();
4767  Register reg = temp.reg();
4768  __ movq(reg, GlobalObject());
4769  __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
4770  frame_->Push(&temp);
4771}
4772
4773
4774ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
4775  if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
4776  ASSERT(scope_->arguments_shadow() != NULL);
4777  // We don't want to do lazy arguments allocation for functions that
4778  // have heap-allocated contexts, because it interfers with the
4779  // uninitialized const tracking in the context objects.
4780  return (scope_->num_heap_slots() > 0)
4781      ? EAGER_ARGUMENTS_ALLOCATION
4782      : LAZY_ARGUMENTS_ALLOCATION;
4783}
4784
4785
4786Result CodeGenerator::StoreArgumentsObject(bool initial) {
4787  ArgumentsAllocationMode mode = ArgumentsMode();
4788  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
4789
4790  Comment cmnt(masm_, "[ store arguments object");
4791  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
4792    // When using lazy arguments allocation, we store the hole value
4793    // as a sentinel indicating that the arguments object hasn't been
4794    // allocated yet.
4795    frame_->Push(Factory::the_hole_value());
4796  } else {
4797    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
4798    frame_->PushFunction();
4799    frame_->PushReceiverSlotAddress();
4800    frame_->Push(Smi::FromInt(scope_->num_parameters()));
4801    Result result = frame_->CallStub(&stub, 3);
4802    frame_->Push(&result);
4803  }
4804
4805
4806  Variable* arguments = scope_->arguments()->var();
4807  Variable* shadow = scope_->arguments_shadow()->var();
4808  ASSERT(arguments != NULL && arguments->slot() != NULL);
4809  ASSERT(shadow != NULL && shadow->slot() != NULL);
4810  JumpTarget done;
4811  bool skip_arguments = false;
4812  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
4813    // We have to skip storing into the arguments slot if it has
4814    // already been written to. This can happen if the a function
4815    // has a local variable named 'arguments'.
4816    LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
4817    Result probe = frame_->Pop();
4818    if (probe.is_constant()) {
4819      // We have to skip updating the arguments object if it has been
4820      // assigned a proper value.
4821      skip_arguments = !probe.handle()->IsTheHole();
4822    } else {
4823      __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
4824      probe.Unuse();
4825      done.Branch(not_equal);
4826    }
4827  }
4828  if (!skip_arguments) {
4829    StoreToSlot(arguments->slot(), NOT_CONST_INIT);
4830    if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
4831  }
4832  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
4833  return frame_->Pop();
4834}
4835
4836
4837void CodeGenerator::LoadTypeofExpression(Expression* expr) {
4838  // Special handling of identifiers as subexpressions of typeof.
4839  Variable* variable = expr->AsVariableProxy()->AsVariable();
4840  if (variable != NULL && !variable->is_this() && variable->is_global()) {
4841    // For a global variable we build the property reference
4842    // <global>.<variable> and perform a (regular non-contextual) property
4843    // load to make sure we do not get reference errors.
4844    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
4845    Literal key(variable->name());
4846    Property property(&global, &key, RelocInfo::kNoPosition);
4847    Reference ref(this, &property);
4848    ref.GetValue();
4849  } else if (variable != NULL && variable->slot() != NULL) {
4850    // For a variable that rewrites to a slot, we signal it is the immediate
4851    // subexpression of a typeof.
4852    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
4853  } else {
4854    // Anything else can be handled normally.
4855    Load(expr);
4856  }
4857}
4858
4859
4860void CodeGenerator::Comparison(Condition cc,
4861                               bool strict,
4862                               ControlDestination* dest) {
4863  // Strict only makes sense for equality comparisons.
4864  ASSERT(!strict || cc == equal);
4865
4866  Result left_side;
4867  Result right_side;
4868  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
4869  if (cc == greater || cc == less_equal) {
4870    cc = ReverseCondition(cc);
4871    left_side = frame_->Pop();
4872    right_side = frame_->Pop();
4873  } else {
4874    right_side = frame_->Pop();
4875    left_side = frame_->Pop();
4876  }
4877  ASSERT(cc == less || cc == equal || cc == greater_equal);
4878
4879  // If either side is a constant smi, optimize the comparison.
4880  bool left_side_constant_smi =
4881      left_side.is_constant() && left_side.handle()->IsSmi();
4882  bool right_side_constant_smi =
4883      right_side.is_constant() && right_side.handle()->IsSmi();
4884  bool left_side_constant_null =
4885      left_side.is_constant() && left_side.handle()->IsNull();
4886  bool right_side_constant_null =
4887      right_side.is_constant() && right_side.handle()->IsNull();
4888
4889  if (left_side_constant_smi || right_side_constant_smi) {
4890    if (left_side_constant_smi && right_side_constant_smi) {
4891      // Trivial case, comparing two constants.
4892      int left_value = Smi::cast(*left_side.handle())->value();
4893      int right_value = Smi::cast(*right_side.handle())->value();
4894      switch (cc) {
4895        case less:
4896          dest->Goto(left_value < right_value);
4897          break;
4898        case equal:
4899          dest->Goto(left_value == right_value);
4900          break;
4901        case greater_equal:
4902          dest->Goto(left_value >= right_value);
4903          break;
4904        default:
4905          UNREACHABLE();
4906      }
4907    } else {  // Only one side is a constant Smi.
4908      // If left side is a constant Smi, reverse the operands.
4909      // Since one side is a constant Smi, conversion order does not matter.
4910      if (left_side_constant_smi) {
4911        Result temp = left_side;
4912        left_side = right_side;
4913        right_side = temp;
4914        cc = ReverseCondition(cc);
4915        // This may reintroduce greater or less_equal as the value of cc.
4916        // CompareStub and the inline code both support all values of cc.
4917      }
4918      // Implement comparison against a constant Smi, inlining the case
4919      // where both sides are Smis.
4920      left_side.ToRegister();
4921
4922      // Here we split control flow to the stub call and inlined cases
4923      // before finally splitting it to the control destination.  We use
4924      // a jump target and branching to duplicate the virtual frame at
4925      // the first split.  We manually handle the off-frame references
4926      // by reconstituting them on the non-fall-through path.
4927      JumpTarget is_smi;
4928      Register left_reg = left_side.reg();
4929      Handle<Object> right_val = right_side.handle();
4930
4931      Condition left_is_smi = masm_->CheckSmi(left_side.reg());
4932      is_smi.Branch(left_is_smi);
4933
4934      // Setup and call the compare stub.
4935      CompareStub stub(cc, strict);
4936      Result result = frame_->CallStub(&stub, &left_side, &right_side);
4937      result.ToRegister();
4938      __ testq(result.reg(), result.reg());
4939      result.Unuse();
4940      dest->true_target()->Branch(cc);
4941      dest->false_target()->Jump();
4942
4943      is_smi.Bind();
4944      left_side = Result(left_reg);
4945      right_side = Result(right_val);
4946      // Test smi equality and comparison by signed int comparison.
4947      // Both sides are smis, so we can use an Immediate.
4948      __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
4949      left_side.Unuse();
4950      right_side.Unuse();
4951      dest->Split(cc);
4952    }
4953  } else if (cc == equal &&
4954             (left_side_constant_null || right_side_constant_null)) {
4955    // To make null checks efficient, we check if either the left side or
4956    // the right side is the constant 'null'.
4957    // If so, we optimize the code by inlining a null check instead of
4958    // calling the (very) general runtime routine for checking equality.
4959    Result operand = left_side_constant_null ? right_side : left_side;
4960    right_side.Unuse();
4961    left_side.Unuse();
4962    operand.ToRegister();
4963    __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
4964    if (strict) {
4965      operand.Unuse();
4966      dest->Split(equal);
4967    } else {
4968      // The 'null' value is only equal to 'undefined' if using non-strict
4969      // comparisons.
4970      dest->true_target()->Branch(equal);
4971      __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
4972      dest->true_target()->Branch(equal);
4973      Condition is_smi = masm_->CheckSmi(operand.reg());
4974      dest->false_target()->Branch(is_smi);
4975
4976      // It can be an undetectable object.
4977      // Use a scratch register in preference to spilling operand.reg().
4978      Result temp = allocator()->Allocate();
4979      ASSERT(temp.is_valid());
4980      __ movq(temp.reg(),
4981              FieldOperand(operand.reg(), HeapObject::kMapOffset));
4982      __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
4983               Immediate(1 << Map::kIsUndetectable));
4984      temp.Unuse();
4985      operand.Unuse();
4986      dest->Split(not_zero);
4987    }
4988  } else {  // Neither side is a constant Smi or null.
4989    // If either side is a non-smi constant, skip the smi check.
4990    bool known_non_smi =
4991        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
4992        (right_side.is_constant() && !right_side.handle()->IsSmi());
4993    left_side.ToRegister();
4994    right_side.ToRegister();
4995
4996    if (known_non_smi) {
4997      // When non-smi, call out to the compare stub.
4998      CompareStub stub(cc, strict);
4999      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5000      // The result is a Smi, which is negative, zero, or positive.
5001      __ SmiTest(answer.reg());  // Sets both zero and sign flag.
5002      answer.Unuse();
5003      dest->Split(cc);
5004    } else {
5005      // Here we split control flow to the stub call and inlined cases
5006      // before finally splitting it to the control destination.  We use
5007      // a jump target and branching to duplicate the virtual frame at
5008      // the first split.  We manually handle the off-frame references
5009      // by reconstituting them on the non-fall-through path.
5010      JumpTarget is_smi;
5011      Register left_reg = left_side.reg();
5012      Register right_reg = right_side.reg();
5013
5014      Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5015      is_smi.Branch(both_smi);
5016      // When non-smi, call out to the compare stub.
5017      CompareStub stub(cc, strict);
5018      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5019      __ SmiTest(answer.reg());  // Sets both zero and sign flags.
5020      answer.Unuse();
5021      dest->true_target()->Branch(cc);
5022      dest->false_target()->Jump();
5023
5024      is_smi.Bind();
5025      left_side = Result(left_reg);
5026      right_side = Result(right_reg);
5027      __ SmiCompare(left_side.reg(), right_side.reg());
5028      right_side.Unuse();
5029      left_side.Unuse();
5030      dest->Split(cc);
5031    }
5032  }
5033}
5034
5035
5036class DeferredInlineBinaryOperation: public DeferredCode {
5037 public:
5038  DeferredInlineBinaryOperation(Token::Value op,
5039                                Register dst,
5040                                Register left,
5041                                Register right,
5042                                OverwriteMode mode)
5043      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
5044    set_comment("[ DeferredInlineBinaryOperation");
5045  }
5046
5047  virtual void Generate();
5048
5049 private:
5050  Token::Value op_;
5051  Register dst_;
5052  Register left_;
5053  Register right_;
5054  OverwriteMode mode_;
5055};
5056
5057
5058void DeferredInlineBinaryOperation::Generate() {
5059  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
5060  stub.GenerateCall(masm_, left_, right_);
5061  if (!dst_.is(rax)) __ movq(dst_, rax);
5062}
5063
5064
5065void CodeGenerator::GenericBinaryOperation(Token::Value op,
5066                                           StaticType* type,
5067                                           OverwriteMode overwrite_mode) {
5068  Comment cmnt(masm_, "[ BinaryOperation");
5069  Comment cmnt_token(masm_, Token::String(op));
5070
5071  if (op == Token::COMMA) {
5072    // Simply discard left value.
5073    frame_->Nip(1);
5074    return;
5075  }
5076
5077  Result right = frame_->Pop();
5078  Result left = frame_->Pop();
5079
5080  if (op == Token::ADD) {
5081    bool left_is_string = left.is_constant() && left.handle()->IsString();
5082    bool right_is_string = right.is_constant() && right.handle()->IsString();
5083    if (left_is_string || right_is_string) {
5084      frame_->Push(&left);
5085      frame_->Push(&right);
5086      Result answer;
5087      if (left_is_string) {
5088        if (right_is_string) {
5089          // TODO(lrn): if both are constant strings
5090          // -- do a compile time cons, if allocation during codegen is allowed.
5091          answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
5092        } else {
5093          answer =
5094            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
5095        }
5096      } else if (right_is_string) {
5097        answer =
5098          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
5099      }
5100      frame_->Push(&answer);
5101      return;
5102    }
5103    // Neither operand is known to be a string.
5104  }
5105
5106  bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
5107  bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
5108  bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
5109  bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
5110
5111  if (left_is_smi && right_is_smi) {
5112    // Compute the constant result at compile time, and leave it on the frame.
5113    int left_int = Smi::cast(*left.handle())->value();
5114    int right_int = Smi::cast(*right.handle())->value();
5115    if (FoldConstantSmis(op, left_int, right_int)) return;
5116  }
5117
5118  Result answer;
5119  if (left_is_non_smi || right_is_non_smi) {
5120    // Go straight to the slow case, with no smi code
5121    frame_->Push(&left);
5122    frame_->Push(&right);
5123    GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
5124    answer = frame_->CallStub(&stub, 2);
5125  } else if (right_is_smi) {
5126    answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
5127                                        type, false, overwrite_mode);
5128  } else if (left_is_smi) {
5129    answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
5130                                        type, true, overwrite_mode);
5131  } else {
5132    // Set the flags based on the operation, type and loop nesting level.
5133    // Bit operations always assume they likely operate on Smis. Still only
5134    // generate the inline Smi check code if this operation is part of a loop.
5135    // For all other operations only inline the Smi check code for likely smis
5136    // if the operation is part of a loop.
5137    if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
5138      answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
5139    } else {
5140      frame_->Push(&left);
5141      frame_->Push(&right);
5142      GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
5143      answer = frame_->CallStub(&stub, 2);
5144    }
5145  }
5146  frame_->Push(&answer);
5147}
5148
5149
5150// Emit a LoadIC call to get the value from receiver and leave it in
5151// dst.  The receiver register is restored after the call.
5152class DeferredReferenceGetNamedValue: public DeferredCode {
5153 public:
5154  DeferredReferenceGetNamedValue(Register dst,
5155                                 Register receiver,
5156                                 Handle<String> name)
5157      : dst_(dst), receiver_(receiver),  name_(name) {
5158    set_comment("[ DeferredReferenceGetNamedValue");
5159  }
5160
5161  virtual void Generate();
5162
5163  Label* patch_site() { return &patch_site_; }
5164
5165 private:
5166  Label patch_site_;
5167  Register dst_;
5168  Register receiver_;
5169  Handle<String> name_;
5170};
5171
5172
5173void DeferredReferenceGetNamedValue::Generate() {
5174  __ push(receiver_);
5175  __ Move(rcx, name_);
5176  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5177  __ Call(ic, RelocInfo::CODE_TARGET);
5178  // The call must be followed by a test rax instruction to indicate
5179  // that the inobject property case was inlined.
5180  //
5181  // Store the delta to the map check instruction here in the test
5182  // instruction.  Use masm_-> instead of the __ macro since the
5183  // latter can't return a value.
5184  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5185  // Here we use masm_-> instead of the __ macro because this is the
5186  // instruction that gets patched and coverage code gets in the way.
5187  masm_->testl(rax, Immediate(-delta_to_patch_site));
5188  __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5189
5190  if (!dst_.is(rax)) __ movq(dst_, rax);
5191  __ pop(receiver_);
5192}
5193
5194
5195void DeferredInlineSmiAdd::Generate() {
5196  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5197  igostub.GenerateCall(masm_, dst_, value_);
5198  if (!dst_.is(rax)) __ movq(dst_, rax);
5199}
5200
5201
5202void DeferredInlineSmiAddReversed::Generate() {
5203  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5204  igostub.GenerateCall(masm_, value_, dst_);
5205  if (!dst_.is(rax)) __ movq(dst_, rax);
5206}
5207
5208
5209void DeferredInlineSmiSub::Generate() {
5210  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
5211  igostub.GenerateCall(masm_, dst_, value_);
5212  if (!dst_.is(rax)) __ movq(dst_, rax);
5213}
5214
5215
5216void DeferredInlineSmiOperation::Generate() {
5217  // For mod we don't generate all the Smi code inline.
5218  GenericBinaryOpStub stub(
5219      op_,
5220      overwrite_mode_,
5221      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
5222  stub.GenerateCall(masm_, src_, value_);
5223  if (!dst_.is(rax)) __ movq(dst_, rax);
5224}
5225
5226
5227Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
5228                                                 Result* operand,
5229                                                 Handle<Object> value,
5230                                                 StaticType* type,
5231                                                 bool reversed,
5232                                                 OverwriteMode overwrite_mode) {
5233  // NOTE: This is an attempt to inline (a bit) more of the code for
5234  // some possible smi operations (like + and -) when (at least) one
5235  // of the operands is a constant smi.
5236  // Consumes the argument "operand".
5237
5238  // TODO(199): Optimize some special cases of operations involving a
5239  // smi literal (multiply by 2, shift by 0, etc.).
5240  if (IsUnsafeSmi(value)) {
5241    Result unsafe_operand(value);
5242    if (reversed) {
5243      return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
5244                               overwrite_mode);
5245    } else {
5246      return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
5247                               overwrite_mode);
5248    }
5249  }
5250
5251  // Get the literal value.
5252  Smi* smi_value = Smi::cast(*value);
5253  int int_value = smi_value->value();
5254
5255  Result answer;
5256  switch (op) {
5257    case Token::ADD: {
5258      operand->ToRegister();
5259      frame_->Spill(operand->reg());
5260      DeferredCode* deferred = NULL;
5261      if (reversed) {
5262        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
5263                                                    smi_value,
5264                                                    overwrite_mode);
5265      } else {
5266        deferred = new DeferredInlineSmiAdd(operand->reg(),
5267                                            smi_value,
5268                                            overwrite_mode);
5269      }
5270      __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5271      __ SmiAddConstant(operand->reg(),
5272                        operand->reg(),
5273                        smi_value,
5274                        deferred->entry_label());
5275      deferred->BindExit();
5276      answer = *operand;
5277      break;
5278    }
5279
5280    case Token::SUB: {
5281      if (reversed) {
5282        Result constant_operand(value);
5283        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5284                                          overwrite_mode);
5285      } else {
5286        operand->ToRegister();
5287        frame_->Spill(operand->reg());
5288        DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
5289                                                          smi_value,
5290                                                          overwrite_mode);
5291        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5292        // A smi currently fits in a 32-bit Immediate.
5293        __ SmiSubConstant(operand->reg(),
5294                          operand->reg(),
5295                          smi_value,
5296                          deferred->entry_label());
5297        deferred->BindExit();
5298        answer = *operand;
5299      }
5300      break;
5301    }
5302
5303    case Token::SAR:
5304      if (reversed) {
5305        Result constant_operand(value);
5306        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5307                                          overwrite_mode);
5308      } else {
5309        // Only the least significant 5 bits of the shift value are used.
5310        // In the slow case, this masking is done inside the runtime call.
5311        int shift_value = int_value & 0x1f;
5312        operand->ToRegister();
5313        frame_->Spill(operand->reg());
5314        DeferredInlineSmiOperation* deferred =
5315            new DeferredInlineSmiOperation(op,
5316                                           operand->reg(),
5317                                           operand->reg(),
5318                                           smi_value,
5319                                           overwrite_mode);
5320        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5321        __ SmiShiftArithmeticRightConstant(operand->reg(),
5322                                           operand->reg(),
5323                                           shift_value);
5324        deferred->BindExit();
5325        answer = *operand;
5326      }
5327      break;
5328
5329    case Token::SHR:
5330      if (reversed) {
5331        Result constant_operand(value);
5332        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5333                                          overwrite_mode);
5334      } else {
5335        // Only the least significant 5 bits of the shift value are used.
5336        // In the slow case, this masking is done inside the runtime call.
5337        int shift_value = int_value & 0x1f;
5338        operand->ToRegister();
5339        answer = allocator()->Allocate();
5340        ASSERT(answer.is_valid());
5341        DeferredInlineSmiOperation* deferred =
5342            new DeferredInlineSmiOperation(op,
5343                                           answer.reg(),
5344                                           operand->reg(),
5345                                           smi_value,
5346                                           overwrite_mode);
5347        __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5348        __ SmiShiftLogicalRightConstant(answer.reg(),
5349                                        operand->reg(),
5350                                        shift_value,
5351                                        deferred->entry_label());
5352        deferred->BindExit();
5353        operand->Unuse();
5354      }
5355      break;
5356
5357    case Token::SHL:
5358      if (reversed) {
5359        Result constant_operand(value);
5360        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5361                                          overwrite_mode);
5362      } else {
5363        // Only the least significant 5 bits of the shift value are used.
5364        // In the slow case, this masking is done inside the runtime call.
5365        int shift_value = int_value & 0x1f;
5366        operand->ToRegister();
5367        if (shift_value == 0) {
5368          // Spill operand so it can be overwritten in the slow case.
5369          frame_->Spill(operand->reg());
5370          DeferredInlineSmiOperation* deferred =
5371              new DeferredInlineSmiOperation(op,
5372                                             operand->reg(),
5373                                             operand->reg(),
5374                                             smi_value,
5375                                             overwrite_mode);
5376          __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5377          deferred->BindExit();
5378          answer = *operand;
5379        } else {
5380          // Use a fresh temporary for nonzero shift values.
5381          answer = allocator()->Allocate();
5382          ASSERT(answer.is_valid());
5383          DeferredInlineSmiOperation* deferred =
5384              new DeferredInlineSmiOperation(op,
5385                                             answer.reg(),
5386                                             operand->reg(),
5387                                             smi_value,
5388                                             overwrite_mode);
5389          __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5390          __ SmiShiftLeftConstant(answer.reg(),
5391                                  operand->reg(),
5392                                  shift_value,
5393                                  deferred->entry_label());
5394          deferred->BindExit();
5395          operand->Unuse();
5396        }
5397      }
5398      break;
5399
5400    case Token::BIT_OR:
5401    case Token::BIT_XOR:
5402    case Token::BIT_AND: {
5403      operand->ToRegister();
5404      frame_->Spill(operand->reg());
5405      if (reversed) {
5406        // Bit operations with a constant smi are commutative.
5407        // We can swap left and right operands with no problem.
5408        // Swap left and right overwrite modes.  0->0, 1->2, 2->1.
5409        overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
5410      }
5411      DeferredCode* deferred =  new DeferredInlineSmiOperation(op,
5412                                                               operand->reg(),
5413                                                               operand->reg(),
5414                                                               smi_value,
5415                                                               overwrite_mode);
5416      __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5417      if (op == Token::BIT_AND) {
5418        __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
5419      } else if (op == Token::BIT_XOR) {
5420        if (int_value != 0) {
5421          __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
5422        }
5423      } else {
5424        ASSERT(op == Token::BIT_OR);
5425        if (int_value != 0) {
5426          __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
5427        }
5428      }
5429      deferred->BindExit();
5430      answer = *operand;
5431      break;
5432    }
5433
5434    // Generate inline code for mod of powers of 2 and negative powers of 2.
5435    case Token::MOD:
5436      if (!reversed &&
5437          int_value != 0 &&
5438          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
5439        operand->ToRegister();
5440        frame_->Spill(operand->reg());
5441        DeferredCode* deferred =
5442            new DeferredInlineSmiOperation(op,
5443                                           operand->reg(),
5444                                           operand->reg(),
5445                                           smi_value,
5446                                           overwrite_mode);
5447        // Check for negative or non-Smi left hand side.
5448        __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
5449        if (int_value < 0) int_value = -int_value;
5450        if (int_value == 1) {
5451          __ Move(operand->reg(), Smi::FromInt(0));
5452        } else {
5453          __ SmiAndConstant(operand->reg(),
5454                            operand->reg(),
5455                            Smi::FromInt(int_value - 1));
5456        }
5457        deferred->BindExit();
5458        answer = *operand;
5459        break;  // This break only applies if we generated code for MOD.
5460      }
5461      // Fall through if we did not find a power of 2 on the right hand side!
5462      // The next case must be the default.
5463
5464    default: {
5465      Result constant_operand(value);
5466      if (reversed) {
5467        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
5468                                          overwrite_mode);
5469      } else {
5470        answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
5471                                          overwrite_mode);
5472      }
5473      break;
5474    }
5475  }
5476  ASSERT(answer.is_valid());
5477  return answer;
5478}
5479
5480Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
5481                                               Result* left,
5482                                               Result* right,
5483                                               OverwriteMode overwrite_mode) {
5484  Result answer;
5485  // Special handling of div and mod because they use fixed registers.
5486  if (op == Token::DIV || op == Token::MOD) {
5487    // We need rax as the quotient register, rdx as the remainder
5488    // register, neither left nor right in rax or rdx, and left copied
5489    // to rax.
5490    Result quotient;
5491    Result remainder;
5492    bool left_is_in_rax = false;
5493    // Step 1: get rax for quotient.
5494    if ((left->is_register() && left->reg().is(rax)) ||
5495        (right->is_register() && right->reg().is(rax))) {
5496      // One or both is in rax.  Use a fresh non-rdx register for
5497      // them.
5498      Result fresh = allocator_->Allocate();
5499      ASSERT(fresh.is_valid());
5500      if (fresh.reg().is(rdx)) {
5501        remainder = fresh;
5502        fresh = allocator_->Allocate();
5503        ASSERT(fresh.is_valid());
5504      }
5505      if (left->is_register() && left->reg().is(rax)) {
5506        quotient = *left;
5507        *left = fresh;
5508        left_is_in_rax = true;
5509      }
5510      if (right->is_register() && right->reg().is(rax)) {
5511        quotient = *right;
5512        *right = fresh;
5513      }
5514      __ movq(fresh.reg(), rax);
5515    } else {
5516      // Neither left nor right is in rax.
5517      quotient = allocator_->Allocate(rax);
5518    }
5519    ASSERT(quotient.is_register() && quotient.reg().is(rax));
5520    ASSERT(!(left->is_register() && left->reg().is(rax)));
5521    ASSERT(!(right->is_register() && right->reg().is(rax)));
5522
5523    // Step 2: get rdx for remainder if necessary.
5524    if (!remainder.is_valid()) {
5525      if ((left->is_register() && left->reg().is(rdx)) ||
5526          (right->is_register() && right->reg().is(rdx))) {
5527        Result fresh = allocator_->Allocate();
5528        ASSERT(fresh.is_valid());
5529        if (left->is_register() && left->reg().is(rdx)) {
5530          remainder = *left;
5531          *left = fresh;
5532        }
5533        if (right->is_register() && right->reg().is(rdx)) {
5534          remainder = *right;
5535          *right = fresh;
5536        }
5537        __ movq(fresh.reg(), rdx);
5538      } else {
5539        // Neither left nor right is in rdx.
5540        remainder = allocator_->Allocate(rdx);
5541      }
5542    }
5543    ASSERT(remainder.is_register() && remainder.reg().is(rdx));
5544    ASSERT(!(left->is_register() && left->reg().is(rdx)));
5545    ASSERT(!(right->is_register() && right->reg().is(rdx)));
5546
5547    left->ToRegister();
5548    right->ToRegister();
5549    frame_->Spill(rax);
5550    frame_->Spill(rdx);
5551
5552    // Check that left and right are smi tagged.
5553    DeferredInlineBinaryOperation* deferred =
5554        new DeferredInlineBinaryOperation(op,
5555                                          (op == Token::DIV) ? rax : rdx,
5556                                          left->reg(),
5557                                          right->reg(),
5558                                          overwrite_mode);
5559    __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5560
5561    if (op == Token::DIV) {
5562      __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
5563      deferred->BindExit();
5564      left->Unuse();
5565      right->Unuse();
5566      answer = quotient;
5567    } else {
5568      ASSERT(op == Token::MOD);
5569      __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
5570      deferred->BindExit();
5571      left->Unuse();
5572      right->Unuse();
5573      answer = remainder;
5574    }
5575    ASSERT(answer.is_valid());
5576    return answer;
5577  }
5578
5579  // Special handling of shift operations because they use fixed
5580  // registers.
5581  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
5582    // Move left out of rcx if necessary.
5583    if (left->is_register() && left->reg().is(rcx)) {
5584      *left = allocator_->Allocate();
5585      ASSERT(left->is_valid());
5586      __ movq(left->reg(), rcx);
5587    }
5588    right->ToRegister(rcx);
5589    left->ToRegister();
5590    ASSERT(left->is_register() && !left->reg().is(rcx));
5591    ASSERT(right->is_register() && right->reg().is(rcx));
5592
5593    // We will modify right, it must be spilled.
5594    frame_->Spill(rcx);
5595
5596    // Use a fresh answer register to avoid spilling the left operand.
5597    answer = allocator_->Allocate();
5598    ASSERT(answer.is_valid());
5599    // Check that both operands are smis using the answer register as a
5600    // temporary.
5601    DeferredInlineBinaryOperation* deferred =
5602        new DeferredInlineBinaryOperation(op,
5603                                          answer.reg(),
5604                                          left->reg(),
5605                                          rcx,
5606                                          overwrite_mode);
5607    __ movq(answer.reg(), left->reg());
5608    __ or_(answer.reg(), rcx);
5609    __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
5610
5611    // Perform the operation.
5612    switch (op) {
5613      case Token::SAR:
5614        __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
5615        break;
5616      case Token::SHR: {
5617        __ SmiShiftLogicalRight(answer.reg(),
5618                              left->reg(),
5619                              rcx,
5620                              deferred->entry_label());
5621        break;
5622      }
5623      case Token::SHL: {
5624        __ SmiShiftLeft(answer.reg(),
5625                        left->reg(),
5626                        rcx,
5627                        deferred->entry_label());
5628        break;
5629      }
5630      default:
5631        UNREACHABLE();
5632    }
5633    deferred->BindExit();
5634    left->Unuse();
5635    right->Unuse();
5636    ASSERT(answer.is_valid());
5637    return answer;
5638  }
5639
5640  // Handle the other binary operations.
5641  left->ToRegister();
5642  right->ToRegister();
5643  // A newly allocated register answer is used to hold the answer.  The
5644  // registers containing left and right are not modified so they don't
5645  // need to be spilled in the fast case.
5646  answer = allocator_->Allocate();
5647  ASSERT(answer.is_valid());
5648
5649  // Perform the smi tag check.
5650  DeferredInlineBinaryOperation* deferred =
5651      new DeferredInlineBinaryOperation(op,
5652                                        answer.reg(),
5653                                        left->reg(),
5654                                        right->reg(),
5655                                        overwrite_mode);
5656  __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5657
5658  switch (op) {
5659    case Token::ADD:
5660      __ SmiAdd(answer.reg(),
5661                left->reg(),
5662                right->reg(),
5663                deferred->entry_label());
5664      break;
5665
5666    case Token::SUB:
5667      __ SmiSub(answer.reg(),
5668                left->reg(),
5669                right->reg(),
5670                deferred->entry_label());
5671      break;
5672
5673    case Token::MUL: {
5674      __ SmiMul(answer.reg(),
5675                left->reg(),
5676                right->reg(),
5677                deferred->entry_label());
5678      break;
5679    }
5680
5681    case Token::BIT_OR:
5682      __ SmiOr(answer.reg(), left->reg(), right->reg());
5683      break;
5684
5685    case Token::BIT_AND:
5686      __ SmiAnd(answer.reg(), left->reg(), right->reg());
5687      break;
5688
5689    case Token::BIT_XOR:
5690      __ SmiXor(answer.reg(), left->reg(), right->reg());
5691      break;
5692
5693    default:
5694      UNREACHABLE();
5695      break;
5696  }
5697  deferred->BindExit();
5698  left->Unuse();
5699  right->Unuse();
5700  ASSERT(answer.is_valid());
5701  return answer;
5702}
5703
5704
5705Result CodeGenerator::EmitKeyedLoad(bool is_global) {
5706  Comment cmnt(masm_, "[ Load from keyed Property");
5707  // Inline array load code if inside of a loop.  We do not know
5708  // the receiver map yet, so we initially generate the code with
5709  // a check against an invalid map.  In the inline cache code, we
5710  // patch the map check if appropriate.
5711  if (loop_nesting() > 0) {
5712    Comment cmnt(masm_, "[ Inlined load from keyed Property");
5713
5714    Result key = frame_->Pop();
5715    Result receiver = frame_->Pop();
5716    key.ToRegister();
5717    receiver.ToRegister();
5718
5719    // Use a fresh temporary to load the elements without destroying
5720    // the receiver which is needed for the deferred slow case.
5721    Result elements = allocator()->Allocate();
5722    ASSERT(elements.is_valid());
5723
5724    // Use a fresh temporary for the index and later the loaded
5725    // value.
5726    Result index = allocator()->Allocate();
5727    ASSERT(index.is_valid());
5728
5729    DeferredReferenceGetKeyedValue* deferred =
5730        new DeferredReferenceGetKeyedValue(index.reg(),
5731                                           receiver.reg(),
5732                                           key.reg(),
5733                                           is_global);
5734
5735    // Check that the receiver is not a smi (only needed if this
5736    // is not a load from the global context) and that it has the
5737    // expected map.
5738    if (!is_global) {
5739      __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5740    }
5741
5742    // Initially, use an invalid map. The map is patched in the IC
5743    // initialization code.
5744    __ bind(deferred->patch_site());
5745    // Use masm-> here instead of the double underscore macro since extra
5746    // coverage code can interfere with the patching.  Do not use
5747    // root array to load null_value, since it must be patched with
5748    // the expected receiver map.
5749    masm_->movq(kScratchRegister, Factory::null_value(),
5750                RelocInfo::EMBEDDED_OBJECT);
5751    masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5752                kScratchRegister);
5753    deferred->Branch(not_equal);
5754
5755    // Check that the key is a non-negative smi.
5756    __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
5757
5758    // Get the elements array from the receiver and check that it
5759    // is not a dictionary.
5760    __ movq(elements.reg(),
5761            FieldOperand(receiver.reg(), JSObject::kElementsOffset));
5762    __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
5763           Factory::fixed_array_map());
5764    deferred->Branch(not_equal);
5765
5766    // Shift the key to get the actual index value and check that
5767    // it is within bounds.
5768    __ SmiToInteger32(index.reg(), key.reg());
5769    __ cmpl(index.reg(),
5770            FieldOperand(elements.reg(), FixedArray::kLengthOffset));
5771    deferred->Branch(above_equal);
5772
5773    // The index register holds the un-smi-tagged key. It has been
5774    // zero-extended to 64-bits, so it can be used directly as index in the
5775    // operand below.
5776    // Load and check that the result is not the hole.  We could
5777    // reuse the index or elements register for the value.
5778    //
5779    // TODO(206): Consider whether it makes sense to try some
5780    // heuristic about which register to reuse.  For example, if
5781    // one is rax, the we can reuse that one because the value
5782    // coming from the deferred code will be in rax.
5783    Result value = index;
5784    __ movq(value.reg(),
5785            Operand(elements.reg(),
5786                    index.reg(),
5787                    times_pointer_size,
5788                    FixedArray::kHeaderSize - kHeapObjectTag));
5789    elements.Unuse();
5790    index.Unuse();
5791    __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
5792    deferred->Branch(equal);
5793    __ IncrementCounter(&Counters::keyed_load_inline, 1);
5794
5795    deferred->BindExit();
5796    // Restore the receiver and key to the frame and push the
5797    // result on top of it.
5798    frame_->Push(&receiver);
5799    frame_->Push(&key);
5800    return value;
5801
5802  } else {
5803    Comment cmnt(masm_, "[ Load from keyed Property");
5804    RelocInfo::Mode mode = is_global
5805        ? RelocInfo::CODE_TARGET_CONTEXT
5806        : RelocInfo::CODE_TARGET;
5807    Result answer = frame_->CallKeyedLoadIC(mode);
5808    // Make sure that we do not have a test instruction after the
5809    // call.  A test instruction after the call is used to
5810    // indicate that we have generated an inline version of the
5811    // keyed load.  The explicit nop instruction is here because
5812    // the push that follows might be peep-hole optimized away.
5813    __ nop();
5814    return answer;
5815  }
5816}
5817
5818
5819#undef __
5820#define __ ACCESS_MASM(masm)
5821
5822
5823Handle<String> Reference::GetName() {
5824  ASSERT(type_ == NAMED);
5825  Property* property = expression_->AsProperty();
5826  if (property == NULL) {
5827    // Global variable reference treated as a named property reference.
5828    VariableProxy* proxy = expression_->AsVariableProxy();
5829    ASSERT(proxy->AsVariable() != NULL);
5830    ASSERT(proxy->AsVariable()->is_global());
5831    return proxy->name();
5832  } else {
5833    Literal* raw_name = property->key()->AsLiteral();
5834    ASSERT(raw_name != NULL);
5835    return Handle<String>(String::cast(*raw_name->handle()));
5836  }
5837}
5838
5839
5840void Reference::GetValue() {
5841  ASSERT(!cgen_->in_spilled_code());
5842  ASSERT(cgen_->HasValidEntryRegisters());
5843  ASSERT(!is_illegal());
5844  MacroAssembler* masm = cgen_->masm();
5845
5846  // Record the source position for the property load.
5847  Property* property = expression_->AsProperty();
5848  if (property != NULL) {
5849    cgen_->CodeForSourcePosition(property->position());
5850  }
5851
5852  switch (type_) {
5853    case SLOT: {
5854      Comment cmnt(masm, "[ Load from Slot");
5855      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
5856      ASSERT(slot != NULL);
5857      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
5858      break;
5859    }
5860
5861    case NAMED: {
5862      Variable* var = expression_->AsVariableProxy()->AsVariable();
5863      bool is_global = var != NULL;
5864      ASSERT(!is_global || var->is_global());
5865
5866      // Do not inline the inobject property case for loads from the global
5867      // object.  Also do not inline for unoptimized code.  This saves time
5868      // in the code generator.  Unoptimized code is toplevel code or code
5869      // that is not in a loop.
5870      if (is_global ||
5871          cgen_->scope()->is_global_scope() ||
5872          cgen_->loop_nesting() == 0) {
5873        Comment cmnt(masm, "[ Load from named Property");
5874        cgen_->frame()->Push(GetName());
5875
5876        RelocInfo::Mode mode = is_global
5877                               ? RelocInfo::CODE_TARGET_CONTEXT
5878                               : RelocInfo::CODE_TARGET;
5879        Result answer = cgen_->frame()->CallLoadIC(mode);
5880        // A test rax instruction following the call signals that the
5881        // inobject property case was inlined.  Ensure that there is not
5882        // a test rax instruction here.
5883        __ nop();
5884        cgen_->frame()->Push(&answer);
5885      } else {
5886        // Inline the inobject property case.
5887        Comment cmnt(masm, "[ Inlined named property load");
5888        Result receiver = cgen_->frame()->Pop();
5889        receiver.ToRegister();
5890        Result value = cgen_->allocator()->Allocate();
5891        ASSERT(value.is_valid());
5892        // Cannot use r12 for receiver, because that changes
5893        // the distance between a call and a fixup location,
5894        // due to a special encoding of r12 as r/m in a ModR/M byte.
5895        if (receiver.reg().is(r12)) {
5896          // Swap receiver and value.
5897          __ movq(value.reg(), receiver.reg());
5898          Result temp = receiver;
5899          receiver = value;
5900          value = temp;
5901          cgen_->frame()->Spill(value.reg());  // r12 may have been shared.
5902        }
5903
5904        DeferredReferenceGetNamedValue* deferred =
5905            new DeferredReferenceGetNamedValue(value.reg(),
5906                                               receiver.reg(),
5907                                               GetName());
5908
5909        // Check that the receiver is a heap object.
5910        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5911
5912        __ bind(deferred->patch_site());
5913        // This is the map check instruction that will be patched (so we can't
5914        // use the double underscore macro that may insert instructions).
5915        // Initially use an invalid map to force a failure.
5916        masm->Move(kScratchRegister, Factory::null_value());
5917        masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5918                   kScratchRegister);
5919        // This branch is always a forwards branch so it's always a fixed
5920        // size which allows the assert below to succeed and patching to work.
5921        // Don't use deferred->Branch(...), since that might add coverage code.
5922        masm->j(not_equal, deferred->entry_label());
5923
5924        // The delta from the patch label to the load offset must be
5925        // statically known.
5926        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
5927               LoadIC::kOffsetToLoadInstruction);
5928        // The initial (invalid) offset has to be large enough to force
5929        // a 32-bit instruction encoding to allow patching with an
5930        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
5931        int offset = kMaxInt;
5932        masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
5933
5934        __ IncrementCounter(&Counters::named_load_inline, 1);
5935        deferred->BindExit();
5936        cgen_->frame()->Push(&receiver);
5937        cgen_->frame()->Push(&value);
5938      }
5939      break;
5940    }
5941
5942    case KEYED: {
5943      Comment cmnt(masm, "[ Load from keyed Property");
5944      Variable* var = expression_->AsVariableProxy()->AsVariable();
5945      bool is_global = var != NULL;
5946      ASSERT(!is_global || var->is_global());
5947
5948      Result value = cgen_->EmitKeyedLoad(is_global);
5949      cgen_->frame()->Push(&value);
5950      break;
5951    }
5952
5953    default:
5954      UNREACHABLE();
5955  }
5956
5957  if (!persist_after_get_) {
5958    cgen_->UnloadReference(this);
5959  }
5960}
5961
5962
5963void Reference::TakeValue() {
5964  // TODO(X64): This function is completely architecture independent. Move
5965  // it somewhere shared.
5966
5967  // For non-constant frame-allocated slots, we invalidate the value in the
5968  // slot.  For all others, we fall back on GetValue.
5969  ASSERT(!cgen_->in_spilled_code());
5970  ASSERT(!is_illegal());
5971  if (type_ != SLOT) {
5972    GetValue();
5973    return;
5974  }
5975
5976  Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
5977  ASSERT(slot != NULL);
5978  if (slot->type() == Slot::LOOKUP ||
5979      slot->type() == Slot::CONTEXT ||
5980      slot->var()->mode() == Variable::CONST ||
5981      slot->is_arguments()) {
5982    GetValue();
5983    return;
5984  }
5985
5986  // Only non-constant, frame-allocated parameters and locals can reach
5987  // here.  Be careful not to use the optimizations for arguments
5988  // object access since it may not have been initialized yet.
5989  ASSERT(!slot->is_arguments());
5990  if (slot->type() == Slot::PARAMETER) {
5991    cgen_->frame()->TakeParameterAt(slot->index());
5992  } else {
5993    ASSERT(slot->type() == Slot::LOCAL);
5994    cgen_->frame()->TakeLocalAt(slot->index());
5995  }
5996
5997  ASSERT(persist_after_get_);
5998  // Do not unload the reference, because it is used in SetValue.
5999}
6000
6001
6002void Reference::SetValue(InitState init_state) {
6003  ASSERT(cgen_->HasValidEntryRegisters());
6004  ASSERT(!is_illegal());
6005  MacroAssembler* masm = cgen_->masm();
6006  switch (type_) {
6007    case SLOT: {
6008      Comment cmnt(masm, "[ Store to Slot");
6009      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6010      ASSERT(slot != NULL);
6011      cgen_->StoreToSlot(slot, init_state);
6012      break;
6013    }
6014
6015    case NAMED: {
6016      Comment cmnt(masm, "[ Store to named Property");
6017      cgen_->frame()->Push(GetName());
6018      Result answer = cgen_->frame()->CallStoreIC();
6019      cgen_->frame()->Push(&answer);
6020      break;
6021    }
6022
6023    case KEYED: {
6024      Comment cmnt(masm, "[ Store to keyed Property");
6025
6026      // Generate inlined version of the keyed store if the code is in
6027      // a loop and the key is likely to be a smi.
6028      Property* property = expression()->AsProperty();
6029      ASSERT(property != NULL);
6030      StaticType* key_smi_analysis = property->key()->type();
6031
6032      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
6033        Comment cmnt(masm, "[ Inlined store to keyed Property");
6034
6035        // Get the receiver, key and value into registers.
6036        Result value = cgen_->frame()->Pop();
6037        Result key = cgen_->frame()->Pop();
6038        Result receiver = cgen_->frame()->Pop();
6039
6040        Result tmp = cgen_->allocator_->Allocate();
6041        ASSERT(tmp.is_valid());
6042
6043        // Determine whether the value is a constant before putting it
6044        // in a register.
6045        bool value_is_constant = value.is_constant();
6046
6047        // Make sure that value, key and receiver are in registers.
6048        value.ToRegister();
6049        key.ToRegister();
6050        receiver.ToRegister();
6051
6052        DeferredReferenceSetKeyedValue* deferred =
6053            new DeferredReferenceSetKeyedValue(value.reg(),
6054                                               key.reg(),
6055                                               receiver.reg());
6056
6057        // Check that the value is a smi if it is not a constant.
6058        // We can skip the write barrier for smis and constants.
6059        if (!value_is_constant) {
6060          __ JumpIfNotSmi(value.reg(), deferred->entry_label());
6061        }
6062
6063        // Check that the key is a non-negative smi.
6064        __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
6065
6066        // Check that the receiver is not a smi.
6067        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6068
6069        // Check that the receiver is a JSArray.
6070        __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
6071        deferred->Branch(not_equal);
6072
6073        // Check that the key is within bounds.  Both the key and the
6074        // length of the JSArray are smis.
6075        __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
6076                      key.reg());
6077        deferred->Branch(less_equal);
6078
6079        // Get the elements array from the receiver and check that it
6080        // is a flat array (not a dictionary).
6081        __ movq(tmp.reg(),
6082                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6083        // Bind the deferred code patch site to be able to locate the
6084        // fixed array map comparison.  When debugging, we patch this
6085        // comparison to always fail so that we will hit the IC call
6086        // in the deferred code which will allow the debugger to
6087        // break for fast case stores.
6088        __ bind(deferred->patch_site());
6089        // Avoid using __ to ensure the distance from patch_site
6090        // to the map address is always the same.
6091        masm->movq(kScratchRegister, Factory::fixed_array_map(),
6092                   RelocInfo::EMBEDDED_OBJECT);
6093        __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6094                kScratchRegister);
6095        deferred->Branch(not_equal);
6096
6097        // Store the value.
6098        SmiIndex index =
6099            masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
6100              __ movq(Operand(tmp.reg(),
6101                        index.reg,
6102                        index.scale,
6103                        FixedArray::kHeaderSize - kHeapObjectTag),
6104                value.reg());
6105        __ IncrementCounter(&Counters::keyed_store_inline, 1);
6106
6107        deferred->BindExit();
6108
6109        cgen_->frame()->Push(&receiver);
6110        cgen_->frame()->Push(&key);
6111        cgen_->frame()->Push(&value);
6112      } else {
6113        Result answer = cgen_->frame()->CallKeyedStoreIC();
6114        // Make sure that we do not have a test instruction after the
6115        // call.  A test instruction after the call is used to
6116        // indicate that we have generated an inline version of the
6117        // keyed store.
6118        masm->nop();
6119        cgen_->frame()->Push(&answer);
6120      }
6121      break;
6122    }
6123
6124    default:
6125      UNREACHABLE();
6126  }
6127  cgen_->UnloadReference(this);
6128}
6129
6130
6131void FastNewClosureStub::Generate(MacroAssembler* masm) {
6132  // Clone the boilerplate in new space. Set the context to the
6133  // current context in rsi.
6134  Label gc;
6135  __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
6136
6137  // Get the boilerplate function from the stack.
6138  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6139
6140  // Compute the function map in the current global context and set that
6141  // as the map of the allocated object.
6142  __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6143  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
6144  __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
6145  __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
6146
6147  // Clone the rest of the boilerplate fields. We don't have to update
6148  // the write barrier because the allocated object is in new space.
6149  for (int offset = kPointerSize;
6150       offset < JSFunction::kSize;
6151       offset += kPointerSize) {
6152    if (offset == JSFunction::kContextOffset) {
6153      __ movq(FieldOperand(rax, offset), rsi);
6154    } else {
6155      __ movq(rbx, FieldOperand(rdx, offset));
6156      __ movq(FieldOperand(rax, offset), rbx);
6157    }
6158  }
6159
6160  // Return and remove the on-stack parameter.
6161  __ ret(1 * kPointerSize);
6162
6163  // Create a new closure through the slower runtime call.
6164  __ bind(&gc);
6165  __ pop(rcx);  // Temporarily remove return address.
6166  __ pop(rdx);
6167  __ push(rsi);
6168  __ push(rdx);
6169  __ push(rcx);  // Restore return address.
6170  __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
6171}
6172
6173
6174void FastNewContextStub::Generate(MacroAssembler* masm) {
6175  // Try to allocate the context in new space.
6176  Label gc;
6177  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
6178  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
6179                        rax, rbx, rcx, &gc, TAG_OBJECT);
6180
6181  // Get the function from the stack.
6182  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
6183
6184  // Setup the object header.
6185  __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
6186  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
6187  __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
6188
6189  // Setup the fixed slots.
6190  __ xor_(rbx, rbx);  // Set to NULL.
6191  __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
6192  __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
6193  __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
6194  __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
6195
6196  // Copy the global object from the surrounding context.
6197  __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
6198  __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
6199
6200  // Initialize the rest of the slots to undefined.
6201  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
6202  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
6203    __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
6204  }
6205
6206  // Return and remove the on-stack parameter.
6207  __ movq(rsi, rax);
6208  __ ret(1 * kPointerSize);
6209
6210  // Need to collect. Call into runtime system.
6211  __ bind(&gc);
6212  __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
6213}
6214
6215
6216void ToBooleanStub::Generate(MacroAssembler* masm) {
6217  Label false_result, true_result, not_string;
6218  __ movq(rax, Operand(rsp, 1 * kPointerSize));
6219
6220  // 'null' => false.
6221  __ CompareRoot(rax, Heap::kNullValueRootIndex);
6222  __ j(equal, &false_result);
6223
6224  // Get the map and type of the heap object.
6225  // We don't use CmpObjectType because we manipulate the type field.
6226  __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6227  __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
6228
6229  // Undetectable => false.
6230  __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
6231  __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
6232  __ j(not_zero, &false_result);
6233
6234  // JavaScript object => true.
6235  __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
6236  __ j(above_equal, &true_result);
6237
6238  // String value => false iff empty.
6239  __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
6240  __ j(above_equal, &not_string);
6241  __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
6242  __ testl(rdx, rdx);
6243  __ j(zero, &false_result);
6244  __ jmp(&true_result);
6245
6246  __ bind(&not_string);
6247  // HeapNumber => false iff +0, -0, or NaN.
6248  // These three cases set C3 when compared to zero in the FPU.
6249  __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6250  __ j(not_equal, &true_result);
6251  __ fldz();  // Load zero onto fp stack
6252  // Load heap-number double value onto fp stack
6253  __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
6254  __ FCmp();
6255  __ j(zero, &false_result);
6256  // Fall through to |true_result|.
6257
6258  // Return 1/0 for true/false in rax.
6259  __ bind(&true_result);
6260  __ movq(rax, Immediate(1));
6261  __ ret(1 * kPointerSize);
6262  __ bind(&false_result);
6263  __ xor_(rax, rax);
6264  __ ret(1 * kPointerSize);
6265}
6266
6267
6268bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
6269  Object* answer_object = Heap::undefined_value();
6270  switch (op) {
6271    case Token::ADD:
6272      // Use intptr_t to detect overflow of 32-bit int.
6273      if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
6274        answer_object = Smi::FromInt(left + right);
6275      }
6276      break;
6277    case Token::SUB:
6278      // Use intptr_t to detect overflow of 32-bit int.
6279      if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
6280        answer_object = Smi::FromInt(left - right);
6281      }
6282      break;
6283    case Token::MUL: {
6284        double answer = static_cast<double>(left) * right;
6285        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
6286          // If the product is zero and the non-zero factor is negative,
6287          // the spec requires us to return floating point negative zero.
6288          if (answer != 0 || (left + right) >= 0) {
6289            answer_object = Smi::FromInt(static_cast<int>(answer));
6290          }
6291        }
6292      }
6293      break;
6294    case Token::DIV:
6295    case Token::MOD:
6296      break;
6297    case Token::BIT_OR:
6298      answer_object = Smi::FromInt(left | right);
6299      break;
6300    case Token::BIT_AND:
6301      answer_object = Smi::FromInt(left & right);
6302      break;
6303    case Token::BIT_XOR:
6304      answer_object = Smi::FromInt(left ^ right);
6305      break;
6306
6307    case Token::SHL: {
6308        int shift_amount = right & 0x1F;
6309        if (Smi::IsValid(left << shift_amount)) {
6310          answer_object = Smi::FromInt(left << shift_amount);
6311        }
6312        break;
6313      }
6314    case Token::SHR: {
6315        int shift_amount = right & 0x1F;
6316        unsigned int unsigned_left = left;
6317        unsigned_left >>= shift_amount;
6318        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
6319          answer_object = Smi::FromInt(unsigned_left);
6320        }
6321        break;
6322      }
6323    case Token::SAR: {
6324        int shift_amount = right & 0x1F;
6325        unsigned int unsigned_left = left;
6326        if (left < 0) {
6327          // Perform arithmetic shift of a negative number by
6328          // complementing number, logical shifting, complementing again.
6329          unsigned_left = ~unsigned_left;
6330          unsigned_left >>= shift_amount;
6331          unsigned_left = ~unsigned_left;
6332        } else {
6333          unsigned_left >>= shift_amount;
6334        }
6335        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
6336        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
6337        break;
6338      }
6339    default:
6340      UNREACHABLE();
6341      break;
6342  }
6343  if (answer_object == Heap::undefined_value()) {
6344    return false;
6345  }
6346  frame_->Push(Handle<Object>(answer_object));
6347  return true;
6348}
6349
6350
6351// End of CodeGenerator implementation.
6352
6353// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
6354// is faster than using the built-in instructions on floating point registers.
6355// Trashes rdi and rbx.  Dest is rcx.  Source cannot be rcx or one of the
6356// trashed registers.
6357void IntegerConvert(MacroAssembler* masm,
6358                    Register source,
6359                    bool use_sse3,
6360                    Label* conversion_failure) {
6361  ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
6362  Label done, right_exponent, normal_exponent;
6363  Register scratch = rbx;
6364  Register scratch2 = rdi;
6365  // Get exponent word.
6366  __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
6367  // Get exponent alone in scratch2.
6368  __ movl(scratch2, scratch);
6369  __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
6370  if (use_sse3) {
6371    CpuFeatures::Scope scope(SSE3);
6372    // Check whether the exponent is too big for a 64 bit signed integer.
6373    static const uint32_t kTooBigExponent =
6374        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
6375    __ cmpl(scratch2, Immediate(kTooBigExponent));
6376    __ j(greater_equal, conversion_failure);
6377    // Load x87 register with heap number.
6378    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
6379    // Reserve space for 64 bit answer.
6380    __ subq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
6381    // Do conversion, which cannot fail because we checked the exponent.
6382    __ fisttp_d(Operand(rsp, 0));
6383    __ movl(rcx, Operand(rsp, 0));  // Load low word of answer into rcx.
6384    __ addq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
6385  } else {
6386    // Load rcx with zero.  We use this either for the final shift or
6387    // for the answer.
6388    __ xor_(rcx, rcx);
6389    // Check whether the exponent matches a 32 bit signed int that cannot be
6390    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
6391    // exponent is 30 (biased).  This is the exponent that we are fastest at and
6392    // also the highest exponent we can handle here.
6393    const uint32_t non_smi_exponent =
6394        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
6395    __ cmpl(scratch2, Immediate(non_smi_exponent));
6396    // If we have a match of the int32-but-not-Smi exponent then skip some
6397    // logic.
6398    __ j(equal, &right_exponent);
6399    // If the exponent is higher than that then go to slow case.  This catches
6400    // numbers that don't fit in a signed int32, infinities and NaNs.
6401    __ j(less, &normal_exponent);
6402
6403    {
6404      // Handle a big exponent.  The only reason we have this code is that the
6405      // >>> operator has a tendency to generate numbers with an exponent of 31.
6406      const uint32_t big_non_smi_exponent =
6407          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
6408      __ cmpl(scratch2, Immediate(big_non_smi_exponent));
6409      __ j(not_equal, conversion_failure);
6410      // We have the big exponent, typically from >>>.  This means the number is
6411      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
6412      __ movl(scratch2, scratch);
6413      __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
6414      // Put back the implicit 1.
6415      __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
6416      // Shift up the mantissa bits to take up the space the exponent used to
6417      // take. We just orred in the implicit bit so that took care of one and
6418      // we want to use the full unsigned range so we subtract 1 bit from the
6419      // shift distance.
6420      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
6421      __ shl(scratch2, Immediate(big_shift_distance));
6422      // Get the second half of the double.
6423      __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
6424      // Shift down 21 bits to get the most significant 11 bits or the low
6425      // mantissa word.
6426      __ shr(rcx, Immediate(32 - big_shift_distance));
6427      __ or_(rcx, scratch2);
6428      // We have the answer in rcx, but we may need to negate it.
6429      __ testl(scratch, scratch);
6430      __ j(positive, &done);
6431      __ neg(rcx);
6432      __ jmp(&done);
6433    }
6434
6435    __ bind(&normal_exponent);
6436    // Exponent word in scratch, exponent part of exponent word in scratch2.
6437    // Zero in rcx.
6438    // We know the exponent is smaller than 30 (biased).  If it is less than
6439    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
6440    // it rounds to zero.
6441    const uint32_t zero_exponent =
6442        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
6443    __ subl(scratch2, Immediate(zero_exponent));
6444    // rcx already has a Smi zero.
6445    __ j(less, &done);
6446
6447    // We have a shifted exponent between 0 and 30 in scratch2.
6448    __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
6449    __ movl(rcx, Immediate(30));
6450    __ subl(rcx, scratch2);
6451
6452    __ bind(&right_exponent);
6453    // Here rcx is the shift, scratch is the exponent word.
6454    // Get the top bits of the mantissa.
6455    __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
6456    // Put back the implicit 1.
6457    __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
6458    // Shift up the mantissa bits to take up the space the exponent used to
6459    // take. We have kExponentShift + 1 significant bits int he low end of the
6460    // word.  Shift them to the top bits.
6461    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
6462    __ shl(scratch, Immediate(shift_distance));
6463    // Get the second half of the double. For some exponents we don't
6464    // actually need this because the bits get shifted out again, but
6465    // it's probably slower to test than just to do it.
6466    __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
6467    // Shift down 22 bits to get the most significant 10 bits or the low
6468    // mantissa word.
6469    __ shr(scratch2, Immediate(32 - shift_distance));
6470    __ or_(scratch2, scratch);
6471    // Move down according to the exponent.
6472    __ shr_cl(scratch2);
6473    // Now the unsigned answer is in scratch2.  We need to move it to rcx and
6474    // we may need to fix the sign.
6475    Label negative;
6476    __ xor_(rcx, rcx);
6477    __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
6478    __ j(greater, &negative);
6479    __ movl(rcx, scratch2);
6480    __ jmp(&done);
6481    __ bind(&negative);
6482    __ subl(rcx, scratch2);
6483    __ bind(&done);
6484  }
6485}
6486
6487
6488void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
6489  Label slow, done;
6490
6491  if (op_ == Token::SUB) {
6492    // Check whether the value is a smi.
6493    Label try_float;
6494    __ JumpIfNotSmi(rax, &try_float);
6495
6496    // Enter runtime system if the value of the smi is zero
6497    // to make sure that we switch between 0 and -0.
6498    // Also enter it if the value of the smi is Smi::kMinValue.
6499    __ SmiNeg(rax, rax, &done);
6500
6501    // Either zero or Smi::kMinValue, neither of which become a smi when
6502    // negated.
6503    __ SmiCompare(rax, Smi::FromInt(0));
6504    __ j(not_equal, &slow);
6505    __ Move(rax, Factory::minus_zero_value());
6506    __ jmp(&done);
6507
6508    // Try floating point case.
6509    __ bind(&try_float);
6510    __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6511    __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6512    __ j(not_equal, &slow);
6513    // Operand is a float, negate its value by flipping sign bit.
6514    __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
6515    __ movq(kScratchRegister, Immediate(0x01));
6516    __ shl(kScratchRegister, Immediate(63));
6517    __ xor_(rdx, kScratchRegister);  // Flip sign.
6518    // rdx is value to store.
6519    if (overwrite_) {
6520      __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
6521    } else {
6522      __ AllocateHeapNumber(rcx, rbx, &slow);
6523      // rcx: allocated 'empty' number
6524      __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
6525      __ movq(rax, rcx);
6526    }
6527  } else if (op_ == Token::BIT_NOT) {
6528    // Check if the operand is a heap number.
6529    __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
6530    __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
6531    __ j(not_equal, &slow);
6532
6533    // Convert the heap number in rax to an untagged integer in rcx.
6534    IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
6535
6536    // Do the bitwise operation and check if the result fits in a smi.
6537    Label try_float;
6538    __ not_(rcx);
6539    // Tag the result as a smi and we're done.
6540    ASSERT(kSmiTagSize == 1);
6541    __ Integer32ToSmi(rax, rcx);
6542  }
6543
6544  // Return from the stub.
6545  __ bind(&done);
6546  __ StubReturn(1);
6547
6548  // Handle the slow case by jumping to the JavaScript builtin.
6549  __ bind(&slow);
6550  __ pop(rcx);  // pop return address
6551  __ push(rax);
6552  __ push(rcx);  // push return address
6553  switch (op_) {
6554    case Token::SUB:
6555      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
6556      break;
6557    case Token::BIT_NOT:
6558      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
6559      break;
6560    default:
6561      UNREACHABLE();
6562  }
6563}
6564
6565
6566void CompareStub::Generate(MacroAssembler* masm) {
6567  Label call_builtin, done;
6568
6569  // NOTICE! This code is only reached after a smi-fast-case check, so
6570  // it is certain that at least one operand isn't a smi.
6571
6572  if (cc_ == equal) {  // Both strict and non-strict.
6573    Label slow;  // Fallthrough label.
6574    // Equality is almost reflexive (everything but NaN), so start by testing
6575    // for "identity and not NaN".
6576    {
6577      Label not_identical;
6578      __ cmpq(rax, rdx);
6579      __ j(not_equal, &not_identical);
6580      // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
6581      // so we do the second best thing - test it ourselves.
6582
6583      if (never_nan_nan_) {
6584        __ xor_(rax, rax);
6585        __ ret(0);
6586      } else {
6587        Label return_equal;
6588        Label heap_number;
6589        // If it's not a heap number, then return equal.
6590        __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
6591               Factory::heap_number_map());
6592        __ j(equal, &heap_number);
6593        __ bind(&return_equal);
6594        __ xor_(rax, rax);
6595        __ ret(0);
6596
6597        __ bind(&heap_number);
6598        // It is a heap number, so return non-equal if it's NaN and equal if
6599        // it's not NaN.
6600        // The representation of NaN values has all exponent bits (52..62) set,
6601        // and not all mantissa bits (0..51) clear.
6602        // We only allow QNaNs, which have bit 51 set (which also rules out
6603        // the value being Infinity).
6604
6605        // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
6606        // all bits in the mask are set. We only need to check the word
6607        // that contains the exponent and high bit of the mantissa.
6608        ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
6609        __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
6610        __ xorl(rax, rax);
6611        __ addl(rdx, rdx);  // Shift value and mask so mask applies to top bits.
6612        __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
6613        __ setcc(above_equal, rax);
6614        __ ret(0);
6615      }
6616
6617      __ bind(&not_identical);
6618    }
6619
6620    // If we're doing a strict equality comparison, we don't have to do
6621    // type conversion, so we generate code to do fast comparison for objects
6622    // and oddballs. Non-smi numbers and strings still go through the usual
6623    // slow-case code.
6624    if (strict_) {
6625      // If either is a Smi (we know that not both are), then they can only
6626      // be equal if the other is a HeapNumber. If so, use the slow case.
6627      {
6628        Label not_smis;
6629        __ SelectNonSmi(rbx, rax, rdx, &not_smis);
6630
6631        // Check if the non-smi operand is a heap number.
6632        __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
6633               Factory::heap_number_map());
6634        // If heap number, handle it in the slow case.
6635        __ j(equal, &slow);
6636        // Return non-equal.  ebx (the lower half of rbx) is not zero.
6637        __ movq(rax, rbx);
6638        __ ret(0);
6639
6640        __ bind(&not_smis);
6641      }
6642
6643      // If either operand is a JSObject or an oddball value, then they are not
6644      // equal since their pointers are different
6645      // There is no test for undetectability in strict equality.
6646
6647      // If the first object is a JS object, we have done pointer comparison.
6648      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
6649      Label first_non_object;
6650      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
6651      __ j(below, &first_non_object);
6652      // Return non-zero (eax (not rax) is not zero)
6653      Label return_not_equal;
6654      ASSERT(kHeapObjectTag != 0);
6655      __ bind(&return_not_equal);
6656      __ ret(0);
6657
6658      __ bind(&first_non_object);
6659      // Check for oddballs: true, false, null, undefined.
6660      __ CmpInstanceType(rcx, ODDBALL_TYPE);
6661      __ j(equal, &return_not_equal);
6662
6663      __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
6664      __ j(above_equal, &return_not_equal);
6665
6666      // Check for oddballs: true, false, null, undefined.
6667      __ CmpInstanceType(rcx, ODDBALL_TYPE);
6668      __ j(equal, &return_not_equal);
6669
6670      // Fall through to the general case.
6671    }
6672    __ bind(&slow);
6673  }
6674
6675  // Push arguments below the return address to prepare jump to builtin.
6676  __ pop(rcx);
6677  __ push(rax);
6678  __ push(rdx);
6679  __ push(rcx);
6680
6681  // Inlined floating point compare.
6682  // Call builtin if operands are not floating point or smi.
6683  Label check_for_symbols;
6684  // Push arguments on stack, for helper functions.
6685  FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
6686  FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
6687  __ FCmp();
6688
6689  // Jump to builtin for NaN.
6690  __ j(parity_even, &call_builtin);
6691
6692  // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
6693  Label below_lbl, above_lbl;
6694  // use rdx, rax to convert unsigned to signed comparison
6695  __ j(below, &below_lbl);
6696  __ j(above, &above_lbl);
6697
6698  __ xor_(rax, rax);  // equal
6699  __ ret(2 * kPointerSize);
6700
6701  __ bind(&below_lbl);
6702  __ movq(rax, Immediate(-1));
6703  __ ret(2 * kPointerSize);
6704
6705  __ bind(&above_lbl);
6706  __ movq(rax, Immediate(1));
6707  __ ret(2 * kPointerSize);  // rax, rdx were pushed
6708
6709  // Fast negative check for symbol-to-symbol equality.
6710  __ bind(&check_for_symbols);
6711  Label check_for_strings;
6712  if (cc_ == equal) {
6713    BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
6714    BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
6715
6716    // We've already checked for object identity, so if both operands
6717    // are symbols they aren't equal. Register eax (not rax) already holds a
6718    // non-zero value, which indicates not equal, so just return.
6719    __ ret(2 * kPointerSize);
6720  }
6721
6722  __ bind(&check_for_strings);
6723
6724  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
6725
6726  // Inline comparison of ascii strings.
6727  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
6728                                                     rdx,
6729                                                     rax,
6730                                                     rcx,
6731                                                     rbx,
6732                                                     rdi,
6733                                                     r8);
6734
6735#ifdef DEBUG
6736  __ Abort("Unexpected fall-through from string comparison");
6737#endif
6738
6739  __ bind(&call_builtin);
6740  // must swap argument order
6741  __ pop(rcx);
6742  __ pop(rdx);
6743  __ pop(rax);
6744  __ push(rdx);
6745  __ push(rax);
6746
6747  // Figure out which native to call and setup the arguments.
6748  Builtins::JavaScript builtin;
6749  if (cc_ == equal) {
6750    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
6751  } else {
6752    builtin = Builtins::COMPARE;
6753    int ncr;  // NaN compare result
6754    if (cc_ == less || cc_ == less_equal) {
6755      ncr = GREATER;
6756    } else {
6757      ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
6758      ncr = LESS;
6759    }
6760    __ Push(Smi::FromInt(ncr));
6761  }
6762
6763  // Restore return address on the stack.
6764  __ push(rcx);
6765
6766  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
6767  // tagged as a small integer.
6768  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
6769}
6770
6771
6772void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
6773                                    Label* label,
6774                                    Register object,
6775                                    Register scratch) {
6776  __ JumpIfSmi(object, label);
6777  __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
6778  __ movzxbq(scratch,
6779             FieldOperand(scratch, Map::kInstanceTypeOffset));
6780  // Ensure that no non-strings have the symbol bit set.
6781  ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
6782  ASSERT(kSymbolTag != 0);
6783  __ testb(scratch, Immediate(kIsSymbolMask));
6784  __ j(zero, label);
6785}
6786
6787
6788// Call the function just below TOS on the stack with the given
6789// arguments. The receiver is the TOS.
6790void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
6791                                      CallFunctionFlags flags,
6792                                      int position) {
6793  // Push the arguments ("left-to-right") on the stack.
6794  int arg_count = args->length();
6795  for (int i = 0; i < arg_count; i++) {
6796    Load(args->at(i));
6797  }
6798
6799  // Record the position for debugging purposes.
6800  CodeForSourcePosition(position);
6801
6802  // Use the shared code stub to call the function.
6803  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
6804  CallFunctionStub call_function(arg_count, in_loop, flags);
6805  Result answer = frame_->CallStub(&call_function, arg_count + 1);
6806  // Restore context and replace function on the stack with the
6807  // result of the stub invocation.
6808  frame_->RestoreContextRegister();
6809  frame_->SetElementAt(0, &answer);
6810}
6811
6812
6813void InstanceofStub::Generate(MacroAssembler* masm) {
6814  // Implements "value instanceof function" operator.
6815  // Expected input state:
6816  //   rsp[0] : return address
6817  //   rsp[1] : function pointer
6818  //   rsp[2] : value
6819
6820  // Get the object - go slow case if it's a smi.
6821  Label slow;
6822  __ movq(rax, Operand(rsp, 2 * kPointerSize));
6823  __ JumpIfSmi(rax, &slow);
6824
6825  // Check that the left hand is a JS object. Leave its map in rax.
6826  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
6827  __ j(below, &slow);
6828  __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
6829  __ j(above, &slow);
6830
6831  // Get the prototype of the function.
6832  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6833  __ TryGetFunctionPrototype(rdx, rbx, &slow);
6834
6835  // Check that the function prototype is a JS object.
6836  __ JumpIfSmi(rbx, &slow);
6837  __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
6838  __ j(below, &slow);
6839  __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
6840  __ j(above, &slow);
6841
6842  // Register mapping: rax is object map and rbx is function prototype.
6843  __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
6844
6845  // Loop through the prototype chain looking for the function prototype.
6846  Label loop, is_instance, is_not_instance;
6847  __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
6848  __ bind(&loop);
6849  __ cmpq(rcx, rbx);
6850  __ j(equal, &is_instance);
6851  __ cmpq(rcx, kScratchRegister);
6852  __ j(equal, &is_not_instance);
6853  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
6854  __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
6855  __ jmp(&loop);
6856
6857  __ bind(&is_instance);
6858  __ xorl(rax, rax);
6859  __ ret(2 * kPointerSize);
6860
6861  __ bind(&is_not_instance);
6862  __ movl(rax, Immediate(1));
6863  __ ret(2 * kPointerSize);
6864
6865  // Slow-case: Go through the JavaScript implementation.
6866  __ bind(&slow);
6867  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
6868}
6869
6870
6871void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
6872  // The displacement is used for skipping the return address and the
6873  // frame pointer on the stack. It is the offset of the last
6874  // parameter (if any) relative to the frame pointer.
6875  static const int kDisplacement = 2 * kPointerSize;
6876
6877  // Check if the calling frame is an arguments adaptor frame.
6878  Label runtime;
6879  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6880  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
6881                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6882  __ j(not_equal, &runtime);
6883  // Value in rcx is Smi encoded.
6884
6885  // Patch the arguments.length and the parameters pointer.
6886  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6887  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
6888  SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
6889  __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
6890  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
6891
6892  // Do the runtime call to allocate the arguments object.
6893  __ bind(&runtime);
6894  Runtime::Function* f = Runtime::FunctionForId(Runtime::kNewArgumentsFast);
6895  __ TailCallRuntime(ExternalReference(f), 3, f->result_size);
6896}
6897
6898
6899void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6900  // The key is in rdx and the parameter count is in rax.
6901
6902  // The displacement is used for skipping the frame pointer on the
6903  // stack. It is the offset of the last parameter (if any) relative
6904  // to the frame pointer.
6905  static const int kDisplacement = 1 * kPointerSize;
6906
6907  // Check that the key is a smi.
6908  Label slow;
6909  __ JumpIfNotSmi(rdx, &slow);
6910
6911  // Check if the calling frame is an arguments adaptor frame.
6912  Label adaptor;
6913  __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6914  __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
6915                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6916  __ j(equal, &adaptor);
6917
6918  // Check index against formal parameters count limit passed in
6919  // through register rax. Use unsigned comparison to get negative
6920  // check for free.
6921  __ cmpq(rdx, rax);
6922  __ j(above_equal, &slow);
6923
6924  // Read the argument from the stack and return it.
6925  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
6926  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
6927  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6928  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6929  __ Ret();
6930
6931  // Arguments adaptor case: Check index against actual arguments
6932  // limit found in the arguments adaptor frame. Use unsigned
6933  // comparison to get negative check for free.
6934  __ bind(&adaptor);
6935  __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6936  __ cmpq(rdx, rcx);
6937  __ j(above_equal, &slow);
6938
6939  // Read the argument from the stack and return it.
6940  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
6941  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
6942  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
6943  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
6944  __ Ret();
6945
6946  // Slow-case: Handle non-smi or out-of-bounds access to arguments
6947  // by calling the runtime system.
6948  __ bind(&slow);
6949  __ pop(rbx);  // Return address.
6950  __ push(rdx);
6951  __ push(rbx);
6952  Runtime::Function* f =
6953      Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
6954  __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
6955}
6956
6957
6958void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
6959  // Check if the calling frame is an arguments adaptor frame.
6960  Label adaptor;
6961  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6962  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
6963                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
6964
6965  // Arguments adaptor case: Read the arguments length from the
6966  // adaptor frame and return it.
6967  // Otherwise nothing to do: The number of formal parameters has already been
6968  // passed in register eax by calling function. Just return it.
6969  __ cmovq(equal, rax,
6970           Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6971  __ ret(0);
6972}
6973
6974
6975int CEntryStub::MinorKey() {
6976  ASSERT(result_size_ <= 2);
6977#ifdef _WIN64
6978  // Simple results returned in rax (using default code).
6979  // Complex results must be written to address passed as first argument.
6980  // Use even numbers for minor keys, reserving the odd numbers for
6981  // CEntryDebugBreakStub.
6982  return (result_size_ < 2) ? 0 : result_size_ * 2;
6983#else
6984  // Single results returned in rax (both AMD64 and Win64 calling conventions)
6985  // and a struct of two pointers in rax+rdx (AMD64 calling convention only)
6986  // by default.
6987  return 0;
6988#endif
6989}
6990
6991
6992void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
6993  // Check that stack should contain next handler, frame pointer, state and
6994  // return address in that order.
6995  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
6996            StackHandlerConstants::kStateOffset);
6997  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
6998            StackHandlerConstants::kPCOffset);
6999
7000  ExternalReference handler_address(Top::k_handler_address);
7001  __ movq(kScratchRegister, handler_address);
7002  __ movq(rsp, Operand(kScratchRegister, 0));
7003  // get next in chain
7004  __ pop(rcx);
7005  __ movq(Operand(kScratchRegister, 0), rcx);
7006  __ pop(rbp);  // pop frame pointer
7007  __ pop(rdx);  // remove state
7008
7009  // Before returning we restore the context from the frame pointer if not NULL.
7010  // The frame pointer is NULL in the exception handler of a JS entry frame.
7011  __ xor_(rsi, rsi);  // tentatively set context pointer to NULL
7012  Label skip;
7013  __ cmpq(rbp, Immediate(0));
7014  __ j(equal, &skip);
7015  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
7016  __ bind(&skip);
7017  __ ret(0);
7018}
7019
7020
7021void CEntryStub::GenerateCore(MacroAssembler* masm,
7022                              Label* throw_normal_exception,
7023                              Label* throw_termination_exception,
7024                              Label* throw_out_of_memory_exception,
7025                              ExitFrame::Mode mode,
7026                              bool do_gc,
7027                              bool always_allocate_scope) {
7028  // rax: result parameter for PerformGC, if any.
7029  // rbx: pointer to C function  (C callee-saved).
7030  // rbp: frame pointer  (restored after C call).
7031  // rsp: stack pointer  (restored after C call).
7032  // r14: number of arguments including receiver (C callee-saved).
7033  // r15: pointer to the first argument (C callee-saved).
7034  //      This pointer is reused in LeaveExitFrame(), so it is stored in a
7035  //      callee-saved register.
7036
7037  if (do_gc) {
7038    // Pass failure code returned from last attempt as first argument to GC.
7039#ifdef _WIN64
7040    __ movq(rcx, rax);
7041#else  // ! defined(_WIN64)
7042    __ movq(rdi, rax);
7043#endif
7044    __ movq(kScratchRegister,
7045            FUNCTION_ADDR(Runtime::PerformGC),
7046            RelocInfo::RUNTIME_ENTRY);
7047    __ call(kScratchRegister);
7048  }
7049
7050  ExternalReference scope_depth =
7051      ExternalReference::heap_always_allocate_scope_depth();
7052  if (always_allocate_scope) {
7053    __ movq(kScratchRegister, scope_depth);
7054    __ incl(Operand(kScratchRegister, 0));
7055  }
7056
7057  // Call C function.
7058#ifdef _WIN64
7059  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
7060  // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
7061  __ movq(Operand(rsp, 4 * kPointerSize), r14);  // argc.
7062  __ movq(Operand(rsp, 5 * kPointerSize), r15);  // argv.
7063  if (result_size_ < 2) {
7064    // Pass a pointer to the Arguments object as the first argument.
7065    // Return result in single register (rax).
7066    __ lea(rcx, Operand(rsp, 4 * kPointerSize));
7067  } else {
7068    ASSERT_EQ(2, result_size_);
7069    // Pass a pointer to the result location as the first argument.
7070    __ lea(rcx, Operand(rsp, 6 * kPointerSize));
7071    // Pass a pointer to the Arguments object as the second argument.
7072    __ lea(rdx, Operand(rsp, 4 * kPointerSize));
7073  }
7074
7075#else  // ! defined(_WIN64)
7076  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
7077  __ movq(rdi, r14);  // argc.
7078  __ movq(rsi, r15);  // argv.
7079#endif
7080  __ call(rbx);
7081  // Result is in rax - do not destroy this register!
7082
7083  if (always_allocate_scope) {
7084    __ movq(kScratchRegister, scope_depth);
7085    __ decl(Operand(kScratchRegister, 0));
7086  }
7087
7088  // Check for failure result.
7089  Label failure_returned;
7090  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
7091#ifdef _WIN64
7092  // If return value is on the stack, pop it to registers.
7093  if (result_size_ > 1) {
7094    ASSERT_EQ(2, result_size_);
7095    // Read result values stored on stack. Result is stored
7096    // above the four argument mirror slots and the two
7097    // Arguments object slots.
7098    __ movq(rax, Operand(rsp, 6 * kPointerSize));
7099    __ movq(rdx, Operand(rsp, 7 * kPointerSize));
7100  }
7101#endif
7102  __ lea(rcx, Operand(rax, 1));
7103  // Lower 2 bits of rcx are 0 iff rax has failure tag.
7104  __ testl(rcx, Immediate(kFailureTagMask));
7105  __ j(zero, &failure_returned);
7106
7107  // Exit the JavaScript to C++ exit frame.
7108  __ LeaveExitFrame(mode, result_size_);
7109  __ ret(0);
7110
7111  // Handling of failure.
7112  __ bind(&failure_returned);
7113
7114  Label retry;
7115  // If the returned exception is RETRY_AFTER_GC continue at retry label
7116  ASSERT(Failure::RETRY_AFTER_GC == 0);
7117  __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
7118  __ j(zero, &retry);
7119
7120  // Special handling of out of memory exceptions.
7121  __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
7122  __ cmpq(rax, kScratchRegister);
7123  __ j(equal, throw_out_of_memory_exception);
7124
7125  // Retrieve the pending exception and clear the variable.
7126  ExternalReference pending_exception_address(Top::k_pending_exception_address);
7127  __ movq(kScratchRegister, pending_exception_address);
7128  __ movq(rax, Operand(kScratchRegister, 0));
7129  __ movq(rdx, ExternalReference::the_hole_value_location());
7130  __ movq(rdx, Operand(rdx, 0));
7131  __ movq(Operand(kScratchRegister, 0), rdx);
7132
7133  // Special handling of termination exceptions which are uncatchable
7134  // by javascript code.
7135  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
7136  __ j(equal, throw_termination_exception);
7137
7138  // Handle normal exception.
7139  __ jmp(throw_normal_exception);
7140
7141  // Retry.
7142  __ bind(&retry);
7143}
7144
7145
7146void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
7147                                          UncatchableExceptionType type) {
7148  // Fetch top stack handler.
7149  ExternalReference handler_address(Top::k_handler_address);
7150  __ movq(kScratchRegister, handler_address);
7151  __ movq(rsp, Operand(kScratchRegister, 0));
7152
7153  // Unwind the handlers until the ENTRY handler is found.
7154  Label loop, done;
7155  __ bind(&loop);
7156  // Load the type of the current stack handler.
7157  const int kStateOffset = StackHandlerConstants::kStateOffset;
7158  __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
7159  __ j(equal, &done);
7160  // Fetch the next handler in the list.
7161  const int kNextOffset = StackHandlerConstants::kNextOffset;
7162  __ movq(rsp, Operand(rsp, kNextOffset));
7163  __ jmp(&loop);
7164  __ bind(&done);
7165
7166  // Set the top handler address to next handler past the current ENTRY handler.
7167  __ movq(kScratchRegister, handler_address);
7168  __ pop(Operand(kScratchRegister, 0));
7169
7170  if (type == OUT_OF_MEMORY) {
7171    // Set external caught exception to false.
7172    ExternalReference external_caught(Top::k_external_caught_exception_address);
7173    __ movq(rax, Immediate(false));
7174    __ store_rax(external_caught);
7175
7176    // Set pending exception and rax to out of memory exception.
7177    ExternalReference pending_exception(Top::k_pending_exception_address);
7178    __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
7179    __ store_rax(pending_exception);
7180  }
7181
7182  // Clear the context pointer.
7183  __ xor_(rsi, rsi);
7184
7185  // Restore registers from handler.
7186  ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
7187            StackHandlerConstants::kFPOffset);
7188  __ pop(rbp);  // FP
7189  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
7190            StackHandlerConstants::kStateOffset);
7191  __ pop(rdx);  // State
7192
7193  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
7194            StackHandlerConstants::kPCOffset);
7195  __ ret(0);
7196}
7197
7198
7199void CallFunctionStub::Generate(MacroAssembler* masm) {
7200  Label slow;
7201
7202  // If the receiver might be a value (string, number or boolean) check for this
7203  // and box it if it is.
7204  if (ReceiverMightBeValue()) {
7205    // Get the receiver from the stack.
7206    // +1 ~ return address
7207    Label receiver_is_value, receiver_is_js_object;
7208    __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
7209
7210    // Check if receiver is a smi (which is a number value).
7211    __ JumpIfSmi(rax, &receiver_is_value);
7212
7213    // Check if the receiver is a valid JS object.
7214    __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
7215    __ j(above_equal, &receiver_is_js_object);
7216
7217    // Call the runtime to box the value.
7218    __ bind(&receiver_is_value);
7219    __ EnterInternalFrame();
7220    __ push(rax);
7221    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
7222    __ LeaveInternalFrame();
7223    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
7224
7225    __ bind(&receiver_is_js_object);
7226  }
7227
7228  // Get the function to call from the stack.
7229  // +2 ~ receiver, return address
7230  __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
7231
7232  // Check that the function really is a JavaScript function.
7233  __ JumpIfSmi(rdi, &slow);
7234  // Goto slow case if we do not have a function.
7235  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
7236  __ j(not_equal, &slow);
7237
7238  // Fast-case: Just invoke the function.
7239  ParameterCount actual(argc_);
7240  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
7241
7242  // Slow-case: Non-function called.
7243  __ bind(&slow);
7244  __ Set(rax, argc_);
7245  __ Set(rbx, 0);
7246  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
7247  Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
7248  __ Jump(adaptor, RelocInfo::CODE_TARGET);
7249}
7250
7251
7252void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
7253  // rax: number of arguments including receiver
7254  // rbx: pointer to C function  (C callee-saved)
7255  // rbp: frame pointer of calling JS frame (restored after C call)
7256  // rsp: stack pointer  (restored after C call)
7257  // rsi: current context (restored)
7258
7259  // NOTE: Invocations of builtins may return failure objects
7260  // instead of a proper result. The builtin entry handles
7261  // this by performing a garbage collection and retrying the
7262  // builtin once.
7263
7264  ExitFrame::Mode mode = is_debug_break ?
7265      ExitFrame::MODE_DEBUG :
7266      ExitFrame::MODE_NORMAL;
7267
7268  // Enter the exit frame that transitions from JavaScript to C++.
7269  __ EnterExitFrame(mode, result_size_);
7270
7271  // rax: Holds the context at this point, but should not be used.
7272  //      On entry to code generated by GenerateCore, it must hold
7273  //      a failure result if the collect_garbage argument to GenerateCore
7274  //      is true.  This failure result can be the result of code
7275  //      generated by a previous call to GenerateCore.  The value
7276  //      of rax is then passed to Runtime::PerformGC.
7277  // rbx: pointer to builtin function  (C callee-saved).
7278  // rbp: frame pointer of exit frame  (restored after C call).
7279  // rsp: stack pointer (restored after C call).
7280  // r14: number of arguments including receiver (C callee-saved).
7281  // r15: argv pointer (C callee-saved).
7282
7283  Label throw_normal_exception;
7284  Label throw_termination_exception;
7285  Label throw_out_of_memory_exception;
7286
7287  // Call into the runtime system.
7288  GenerateCore(masm,
7289               &throw_normal_exception,
7290               &throw_termination_exception,
7291               &throw_out_of_memory_exception,
7292               mode,
7293               false,
7294               false);
7295
7296  // Do space-specific GC and retry runtime call.
7297  GenerateCore(masm,
7298               &throw_normal_exception,
7299               &throw_termination_exception,
7300               &throw_out_of_memory_exception,
7301               mode,
7302               true,
7303               false);
7304
7305  // Do full GC and retry runtime call one final time.
7306  Failure* failure = Failure::InternalError();
7307  __ movq(rax, failure, RelocInfo::NONE);
7308  GenerateCore(masm,
7309               &throw_normal_exception,
7310               &throw_termination_exception,
7311               &throw_out_of_memory_exception,
7312               mode,
7313               true,
7314               true);
7315
7316  __ bind(&throw_out_of_memory_exception);
7317  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
7318
7319  __ bind(&throw_termination_exception);
7320  GenerateThrowUncatchable(masm, TERMINATION);
7321
7322  __ bind(&throw_normal_exception);
7323  GenerateThrowTOS(masm);
7324}
7325
7326
7327void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
7328  UNREACHABLE();
7329}
7330
7331
7332void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
7333  Label invoke, exit;
7334#ifdef ENABLE_LOGGING_AND_PROFILING
7335  Label not_outermost_js, not_outermost_js_2;
7336#endif
7337
7338  // Setup frame.
7339  __ push(rbp);
7340  __ movq(rbp, rsp);
7341
7342  // Push the stack frame type marker twice.
7343  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
7344  __ Push(Smi::FromInt(marker));  // context slot
7345  __ Push(Smi::FromInt(marker));  // function slot
7346  // Save callee-saved registers (X64 calling conventions).
7347  __ push(r12);
7348  __ push(r13);
7349  __ push(r14);
7350  __ push(r15);
7351  __ push(rdi);
7352  __ push(rsi);
7353  __ push(rbx);
7354  // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
7355  // callee-save in JS code as well.
7356
7357  // Save copies of the top frame descriptor on the stack.
7358  ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
7359  __ load_rax(c_entry_fp);
7360  __ push(rax);
7361
7362#ifdef ENABLE_LOGGING_AND_PROFILING
7363  // If this is the outermost JS call, set js_entry_sp value.
7364  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
7365  __ load_rax(js_entry_sp);
7366  __ testq(rax, rax);
7367  __ j(not_zero, &not_outermost_js);
7368  __ movq(rax, rbp);
7369  __ store_rax(js_entry_sp);
7370  __ bind(&not_outermost_js);
7371#endif
7372
7373  // Call a faked try-block that does the invoke.
7374  __ call(&invoke);
7375
7376  // Caught exception: Store result (exception) in the pending
7377  // exception field in the JSEnv and return a failure sentinel.
7378  ExternalReference pending_exception(Top::k_pending_exception_address);
7379  __ store_rax(pending_exception);
7380  __ movq(rax, Failure::Exception(), RelocInfo::NONE);
7381  __ jmp(&exit);
7382
7383  // Invoke: Link this frame into the handler chain.
7384  __ bind(&invoke);
7385  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
7386
7387  // Clear any pending exceptions.
7388  __ load_rax(ExternalReference::the_hole_value_location());
7389  __ store_rax(pending_exception);
7390
7391  // Fake a receiver (NULL).
7392  __ push(Immediate(0));  // receiver
7393
7394  // Invoke the function by calling through JS entry trampoline
7395  // builtin and pop the faked function when we return. We load the address
7396  // from an external reference instead of inlining the call target address
7397  // directly in the code, because the builtin stubs may not have been
7398  // generated yet at the time this code is generated.
7399  if (is_construct) {
7400    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
7401    __ load_rax(construct_entry);
7402  } else {
7403    ExternalReference entry(Builtins::JSEntryTrampoline);
7404    __ load_rax(entry);
7405  }
7406  __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
7407  __ call(kScratchRegister);
7408
7409  // Unlink this frame from the handler chain.
7410  __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
7411  __ pop(Operand(kScratchRegister, 0));
7412  // Pop next_sp.
7413  __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
7414
7415#ifdef ENABLE_LOGGING_AND_PROFILING
7416  // If current EBP value is the same as js_entry_sp value, it means that
7417  // the current function is the outermost.
7418  __ movq(kScratchRegister, js_entry_sp);
7419  __ cmpq(rbp, Operand(kScratchRegister, 0));
7420  __ j(not_equal, &not_outermost_js_2);
7421  __ movq(Operand(kScratchRegister, 0), Immediate(0));
7422  __ bind(&not_outermost_js_2);
7423#endif
7424
7425  // Restore the top frame descriptor from the stack.
7426  __ bind(&exit);
7427  __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
7428  __ pop(Operand(kScratchRegister, 0));
7429
7430  // Restore callee-saved registers (X64 conventions).
7431  __ pop(rbx);
7432  __ pop(rsi);
7433  __ pop(rdi);
7434  __ pop(r15);
7435  __ pop(r14);
7436  __ pop(r13);
7437  __ pop(r12);
7438  __ addq(rsp, Immediate(2 * kPointerSize));  // remove markers
7439
7440  // Restore frame pointer and return.
7441  __ pop(rbp);
7442  __ ret(0);
7443}
7444
7445
7446// -----------------------------------------------------------------------------
7447// Implementation of stubs.
7448
7449//  Stub classes have public member named masm, not masm_.
7450
7451void StackCheckStub::Generate(MacroAssembler* masm) {
7452  // Because builtins always remove the receiver from the stack, we
7453  // have to fake one to avoid underflowing the stack. The receiver
7454  // must be inserted below the return address on the stack so we
7455  // temporarily store that in a register.
7456  __ pop(rax);
7457  __ Push(Smi::FromInt(0));
7458  __ push(rax);
7459
7460  // Do tail-call to runtime routine.
7461  Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
7462  __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
7463}
7464
7465
7466void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7467                                           Register number) {
7468  Label load_smi, done;
7469
7470  __ JumpIfSmi(number, &load_smi);
7471  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
7472  __ jmp(&done);
7473
7474  __ bind(&load_smi);
7475  __ SmiToInteger32(number, number);
7476  __ push(number);
7477  __ fild_s(Operand(rsp, 0));
7478  __ pop(number);
7479
7480  __ bind(&done);
7481}
7482
7483
7484void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7485                                           Register src,
7486                                           XMMRegister dst) {
7487  Label load_smi, done;
7488
7489  __ JumpIfSmi(src, &load_smi);
7490  __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
7491  __ jmp(&done);
7492
7493  __ bind(&load_smi);
7494  __ SmiToInteger32(src, src);
7495  __ cvtlsi2sd(dst, src);
7496
7497  __ bind(&done);
7498}
7499
7500
7501void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7502                                            XMMRegister dst1,
7503                                            XMMRegister dst2) {
7504  __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7505  LoadFloatOperand(masm, kScratchRegister, dst1);
7506  __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7507  LoadFloatOperand(masm, kScratchRegister, dst2);
7508}
7509
7510
7511void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
7512  Label load_smi_1, load_smi_2, done_load_1, done;
7513  __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7514  __ JumpIfSmi(kScratchRegister, &load_smi_1);
7515  __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7516  __ bind(&done_load_1);
7517
7518  __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7519  __ JumpIfSmi(kScratchRegister, &load_smi_2);
7520  __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7521  __ jmp(&done);
7522
7523  __ bind(&load_smi_1);
7524  __ SmiToInteger32(kScratchRegister, kScratchRegister);
7525  __ push(kScratchRegister);
7526  __ fild_s(Operand(rsp, 0));
7527  __ pop(kScratchRegister);
7528  __ jmp(&done_load_1);
7529
7530  __ bind(&load_smi_2);
7531  __ SmiToInteger32(kScratchRegister, kScratchRegister);
7532  __ push(kScratchRegister);
7533  __ fild_s(Operand(rsp, 0));
7534  __ pop(kScratchRegister);
7535
7536  __ bind(&done);
7537}
7538
7539
7540// Input: rdx, rax are the left and right objects of a bit op.
7541// Output: rax, rcx are left and right integers for a bit op.
7542void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
7543                                         bool use_sse3,
7544                                         Label* conversion_failure) {
7545  // Check float operands.
7546  Label arg1_is_object, check_undefined_arg1;
7547  Label arg2_is_object, check_undefined_arg2;
7548  Label load_arg2, done;
7549
7550  __ JumpIfNotSmi(rdx, &arg1_is_object);
7551  __ SmiToInteger32(rdx, rdx);
7552  __ jmp(&load_arg2);
7553
7554  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
7555  __ bind(&check_undefined_arg1);
7556  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
7557  __ j(not_equal, conversion_failure);
7558  __ movl(rdx, Immediate(0));
7559  __ jmp(&load_arg2);
7560
7561  __ bind(&arg1_is_object);
7562  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
7563  __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
7564  __ j(not_equal, &check_undefined_arg1);
7565  // Get the untagged integer version of the edx heap number in rcx.
7566  IntegerConvert(masm, rdx, use_sse3, conversion_failure);
7567  __ movl(rdx, rcx);
7568
7569  // Here edx has the untagged integer, eax has a Smi or a heap number.
7570  __ bind(&load_arg2);
7571  // Test if arg2 is a Smi.
7572  __ JumpIfNotSmi(rax, &arg2_is_object);
7573  __ SmiToInteger32(rax, rax);
7574  __ movl(rcx, rax);
7575  __ jmp(&done);
7576
7577  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
7578  __ bind(&check_undefined_arg2);
7579  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
7580  __ j(not_equal, conversion_failure);
7581  __ movl(rcx, Immediate(0));
7582  __ jmp(&done);
7583
7584  __ bind(&arg2_is_object);
7585  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
7586  __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
7587  __ j(not_equal, &check_undefined_arg2);
7588  // Get the untagged integer version of the eax heap number in ecx.
7589  IntegerConvert(masm, rax, use_sse3, conversion_failure);
7590  __ bind(&done);
7591  __ movl(rax, rdx);
7592}
7593
7594
7595void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7596                                            Register lhs,
7597                                            Register rhs) {
7598  Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
7599  __ JumpIfSmi(lhs, &load_smi_lhs);
7600  __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
7601  __ bind(&done_load_lhs);
7602
7603  __ JumpIfSmi(rhs, &load_smi_rhs);
7604  __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
7605  __ jmp(&done);
7606
7607  __ bind(&load_smi_lhs);
7608  __ SmiToInteger64(kScratchRegister, lhs);
7609  __ push(kScratchRegister);
7610  __ fild_d(Operand(rsp, 0));
7611  __ pop(kScratchRegister);
7612  __ jmp(&done_load_lhs);
7613
7614  __ bind(&load_smi_rhs);
7615  __ SmiToInteger64(kScratchRegister, rhs);
7616  __ push(kScratchRegister);
7617  __ fild_d(Operand(rsp, 0));
7618  __ pop(kScratchRegister);
7619
7620  __ bind(&done);
7621}
7622
7623
7624void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
7625                                              Label* non_float) {
7626  Label test_other, done;
7627  // Test if both operands are numbers (heap_numbers or smis).
7628  // If not, jump to label non_float.
7629  __ JumpIfSmi(rdx, &test_other);  // argument in rdx is OK
7630  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
7631  __ j(not_equal, non_float);  // The argument in rdx is not a number.
7632
7633  __ bind(&test_other);
7634  __ JumpIfSmi(rax, &done);  // argument in rax is OK
7635  __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
7636  __ j(not_equal, non_float);  // The argument in rax is not a number.
7637
7638  // Fall-through: Both operands are numbers.
7639  __ bind(&done);
7640}
7641
7642
7643const char* GenericBinaryOpStub::GetName() {
7644  if (name_ != NULL) return name_;
7645  const int len = 100;
7646  name_ = Bootstrapper::AllocateAutoDeletedArray(len);
7647  if (name_ == NULL) return "OOM";
7648  const char* op_name = Token::Name(op_);
7649  const char* overwrite_name;
7650  switch (mode_) {
7651    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
7652    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
7653    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
7654    default: overwrite_name = "UnknownOverwrite"; break;
7655  }
7656
7657  OS::SNPrintF(Vector<char>(name_, len),
7658               "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
7659               op_name,
7660               overwrite_name,
7661               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
7662               args_in_registers_ ? "RegArgs" : "StackArgs",
7663               args_reversed_ ? "_R" : "",
7664               use_sse3_ ? "SSE3" : "SSE2");
7665  return name_;
7666}
7667
7668
7669void GenericBinaryOpStub::GenerateCall(
7670    MacroAssembler* masm,
7671    Register left,
7672    Register right) {
7673  if (!ArgsInRegistersSupported()) {
7674    // Pass arguments on the stack.
7675    __ push(left);
7676    __ push(right);
7677  } else {
7678    // The calling convention with registers is left in rdx and right in rax.
7679    Register left_arg = rdx;
7680    Register right_arg = rax;
7681    if (!(left.is(left_arg) && right.is(right_arg))) {
7682      if (left.is(right_arg) && right.is(left_arg)) {
7683        if (IsOperationCommutative()) {
7684          SetArgsReversed();
7685        } else {
7686          __ xchg(left, right);
7687        }
7688      } else if (left.is(left_arg)) {
7689        __ movq(right_arg, right);
7690      } else if (left.is(right_arg)) {
7691        if (IsOperationCommutative()) {
7692          __ movq(left_arg, right);
7693          SetArgsReversed();
7694        } else {
7695          // Order of moves important to avoid destroying left argument.
7696          __ movq(left_arg, left);
7697          __ movq(right_arg, right);
7698        }
7699      } else if (right.is(left_arg)) {
7700        if (IsOperationCommutative()) {
7701          __ movq(right_arg, left);
7702          SetArgsReversed();
7703        } else {
7704          // Order of moves important to avoid destroying right argument.
7705          __ movq(right_arg, right);
7706          __ movq(left_arg, left);
7707        }
7708      } else if (right.is(right_arg)) {
7709        __ movq(left_arg, left);
7710      } else {
7711        // Order of moves is not important.
7712        __ movq(left_arg, left);
7713        __ movq(right_arg, right);
7714      }
7715    }
7716
7717    // Update flags to indicate that arguments are in registers.
7718    SetArgsInRegisters();
7719    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7720  }
7721
7722  // Call the stub.
7723  __ CallStub(this);
7724}
7725
7726
7727void GenericBinaryOpStub::GenerateCall(
7728    MacroAssembler* masm,
7729    Register left,
7730    Smi* right) {
7731  if (!ArgsInRegistersSupported()) {
7732    // Pass arguments on the stack.
7733    __ push(left);
7734    __ Push(right);
7735  } else {
7736    // The calling convention with registers is left in rdx and right in rax.
7737    Register left_arg = rdx;
7738    Register right_arg = rax;
7739    if (left.is(left_arg)) {
7740      __ Move(right_arg, right);
7741    } else if (left.is(right_arg) && IsOperationCommutative()) {
7742      __ Move(left_arg, right);
7743      SetArgsReversed();
7744    } else {
7745      __ movq(left_arg, left);
7746      __ Move(right_arg, right);
7747    }
7748
7749    // Update flags to indicate that arguments are in registers.
7750    SetArgsInRegisters();
7751    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7752  }
7753
7754  // Call the stub.
7755  __ CallStub(this);
7756}
7757
7758
7759void GenericBinaryOpStub::GenerateCall(
7760    MacroAssembler* masm,
7761    Smi* left,
7762    Register right) {
7763  if (!ArgsInRegistersSupported()) {
7764    // Pass arguments on the stack.
7765    __ Push(left);
7766    __ push(right);
7767  } else {
7768    // The calling convention with registers is left in rdx and right in rax.
7769    Register left_arg = rdx;
7770    Register right_arg = rax;
7771    if (right.is(right_arg)) {
7772      __ Move(left_arg, left);
7773    } else if (right.is(left_arg) && IsOperationCommutative()) {
7774      __ Move(right_arg, left);
7775      SetArgsReversed();
7776    } else {
7777      __ Move(left_arg, left);
7778      __ movq(right_arg, right);
7779    }
7780    // Update flags to indicate that arguments are in registers.
7781    SetArgsInRegisters();
7782    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
7783  }
7784
7785  // Call the stub.
7786  __ CallStub(this);
7787}
7788
7789
7790void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7791  // Perform fast-case smi code for the operation (rax <op> rbx) and
7792  // leave result in register rax.
7793
7794  // Smi check both operands.
7795  __ JumpIfNotBothSmi(rax, rbx, slow);
7796
7797  switch (op_) {
7798    case Token::ADD: {
7799      __ SmiAdd(rax, rax, rbx, slow);
7800      break;
7801    }
7802
7803    case Token::SUB: {
7804      __ SmiSub(rax, rax, rbx, slow);
7805      break;
7806    }
7807
7808    case Token::MUL:
7809      __ SmiMul(rax, rax, rbx, slow);
7810      break;
7811
7812    case Token::DIV:
7813      __ SmiDiv(rax, rax, rbx, slow);
7814      break;
7815
7816    case Token::MOD:
7817      __ SmiMod(rax, rax, rbx, slow);
7818      break;
7819
7820    case Token::BIT_OR:
7821      __ SmiOr(rax, rax, rbx);
7822      break;
7823
7824    case Token::BIT_AND:
7825      __ SmiAnd(rax, rax, rbx);
7826      break;
7827
7828    case Token::BIT_XOR:
7829      __ SmiXor(rax, rax, rbx);
7830      break;
7831
7832    case Token::SHL:
7833    case Token::SHR:
7834    case Token::SAR:
7835      // Move the second operand into register rcx.
7836      __ movq(rcx, rbx);
7837      // Perform the operation.
7838      switch (op_) {
7839        case Token::SAR:
7840          __ SmiShiftArithmeticRight(rax, rax, rcx);
7841          break;
7842        case Token::SHR:
7843          __ SmiShiftLogicalRight(rax, rax, rcx, slow);
7844          break;
7845        case Token::SHL:
7846          __ SmiShiftLeft(rax, rax, rcx, slow);
7847          break;
7848        default:
7849          UNREACHABLE();
7850      }
7851      break;
7852
7853    default:
7854      UNREACHABLE();
7855      break;
7856  }
7857}
7858
7859
7860void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7861  Label call_runtime;
7862  if (HasSmiCodeInStub()) {
7863    // The fast case smi code wasn't inlined in the stub caller
7864    // code. Generate it here to speed up common operations.
7865    Label slow;
7866    __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // get y
7867    __ movq(rax, Operand(rsp, 2 * kPointerSize));  // get x
7868    GenerateSmiCode(masm, &slow);
7869    GenerateReturn(masm);
7870
7871    // Too bad. The fast case smi code didn't succeed.
7872    __ bind(&slow);
7873  }
7874
7875  // Make sure the arguments are in rdx and rax.
7876  GenerateLoadArguments(masm);
7877
7878  // Floating point case.
7879  switch (op_) {
7880    case Token::ADD:
7881    case Token::SUB:
7882    case Token::MUL:
7883    case Token::DIV: {
7884      // rax: y
7885      // rdx: x
7886      FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
7887      // Fast-case: Both operands are numbers.
7888      // Allocate a heap number, if needed.
7889      Label skip_allocation;
7890      switch (mode_) {
7891        case OVERWRITE_LEFT:
7892          __ movq(rax, rdx);
7893          // Fall through!
7894        case OVERWRITE_RIGHT:
7895          // If the argument in rax is already an object, we skip the
7896          // allocation of a heap number.
7897          __ JumpIfNotSmi(rax, &skip_allocation);
7898          // Fall through!
7899        case NO_OVERWRITE:
7900          // Allocate a heap number for the result. Keep rax and rdx intact
7901          // for the possible runtime call.
7902          __ AllocateHeapNumber(rbx, rcx, &call_runtime);
7903          __ movq(rax, rbx);
7904          __ bind(&skip_allocation);
7905          break;
7906        default: UNREACHABLE();
7907      }
7908      // xmm4 and xmm5 are volatile XMM registers.
7909      FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
7910
7911      switch (op_) {
7912        case Token::ADD: __ addsd(xmm4, xmm5); break;
7913        case Token::SUB: __ subsd(xmm4, xmm5); break;
7914        case Token::MUL: __ mulsd(xmm4, xmm5); break;
7915        case Token::DIV: __ divsd(xmm4, xmm5); break;
7916        default: UNREACHABLE();
7917      }
7918      __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
7919      GenerateReturn(masm);
7920    }
7921    case Token::MOD: {
7922      // For MOD we go directly to runtime in the non-smi case.
7923      break;
7924    }
7925    case Token::BIT_OR:
7926    case Token::BIT_AND:
7927    case Token::BIT_XOR:
7928    case Token::SAR:
7929    case Token::SHL:
7930    case Token::SHR: {
7931      Label skip_allocation, non_smi_result;
7932      FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
7933      switch (op_) {
7934        case Token::BIT_OR:  __ orl(rax, rcx); break;
7935        case Token::BIT_AND: __ andl(rax, rcx); break;
7936        case Token::BIT_XOR: __ xorl(rax, rcx); break;
7937        case Token::SAR: __ sarl_cl(rax); break;
7938        case Token::SHL: __ shll_cl(rax); break;
7939        case Token::SHR: __ shrl_cl(rax); break;
7940        default: UNREACHABLE();
7941      }
7942      if (op_ == Token::SHR) {
7943        // Check if result is non-negative. This can only happen for a shift
7944        // by zero, which also doesn't update the sign flag.
7945        __ testl(rax, rax);
7946        __ j(negative, &non_smi_result);
7947      }
7948      __ JumpIfNotValidSmiValue(rax, &non_smi_result);
7949      // Tag smi result, if possible, and return.
7950      __ Integer32ToSmi(rax, rax);
7951      GenerateReturn(masm);
7952
7953      // All ops except SHR return a signed int32 that we load in a HeapNumber.
7954      if (op_ != Token::SHR && non_smi_result.is_linked()) {
7955        __ bind(&non_smi_result);
7956        // Allocate a heap number if needed.
7957        __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
7958        switch (mode_) {
7959          case OVERWRITE_LEFT:
7960          case OVERWRITE_RIGHT:
7961            // If the operand was an object, we skip the
7962            // allocation of a heap number.
7963            __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
7964                                 1 * kPointerSize : 2 * kPointerSize));
7965            __ JumpIfNotSmi(rax, &skip_allocation);
7966            // Fall through!
7967          case NO_OVERWRITE:
7968            __ AllocateHeapNumber(rax, rcx, &call_runtime);
7969            __ bind(&skip_allocation);
7970            break;
7971          default: UNREACHABLE();
7972        }
7973        // Store the result in the HeapNumber and return.
7974        __ movq(Operand(rsp, 1 * kPointerSize), rbx);
7975        __ fild_s(Operand(rsp, 1 * kPointerSize));
7976        __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
7977        GenerateReturn(masm);
7978      }
7979
7980      // SHR should return uint32 - go to runtime for non-smi/negative result.
7981      if (op_ == Token::SHR) {
7982        __ bind(&non_smi_result);
7983      }
7984      __ movq(rax, Operand(rsp, 1 * kPointerSize));
7985      __ movq(rdx, Operand(rsp, 2 * kPointerSize));
7986      break;
7987    }
7988    default: UNREACHABLE(); break;
7989  }
7990
7991  // If all else fails, use the runtime system to get the correct
7992  // result. If arguments was passed in registers now place them on the
7993  // stack in the correct order below the return address.
7994  __ bind(&call_runtime);
7995  if (HasArgumentsInRegisters()) {
7996    __ pop(rcx);
7997    if (HasArgumentsReversed()) {
7998      __ push(rax);
7999      __ push(rdx);
8000    } else {
8001      __ push(rdx);
8002      __ push(rax);
8003    }
8004    __ push(rcx);
8005  }
8006  switch (op_) {
8007    case Token::ADD: {
8008      // Test for string arguments before calling runtime.
8009      Label not_strings, both_strings, not_string1, string1;
8010      Condition is_smi;
8011      Result answer;
8012      __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // First argument.
8013      __ movq(rax, Operand(rsp, 1 * kPointerSize));  // Second argument.
8014      is_smi = masm->CheckSmi(rdx);
8015      __ j(is_smi, &not_string1);
8016      __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
8017      __ j(above_equal, &not_string1);
8018
8019      // First argument is a a string, test second.
8020      is_smi = masm->CheckSmi(rax);
8021      __ j(is_smi, &string1);
8022      __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
8023      __ j(above_equal, &string1);
8024
8025      // First and second argument are strings.
8026      StringAddStub stub(NO_STRING_CHECK_IN_STUB);
8027      __ TailCallStub(&stub);
8028
8029      // Only first argument is a string.
8030      __ bind(&string1);
8031      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
8032
8033      // First argument was not a string, test second.
8034      __ bind(&not_string1);
8035      is_smi = masm->CheckSmi(rax);
8036      __ j(is_smi, &not_strings);
8037      __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
8038      __ j(above_equal, &not_strings);
8039
8040      // Only second argument is a string.
8041      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
8042
8043      __ bind(&not_strings);
8044      // Neither argument is a string.
8045      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
8046      break;
8047    }
8048    case Token::SUB:
8049      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
8050      break;
8051    case Token::MUL:
8052      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
8053        break;
8054    case Token::DIV:
8055      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
8056      break;
8057    case Token::MOD:
8058      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
8059      break;
8060    case Token::BIT_OR:
8061      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
8062      break;
8063    case Token::BIT_AND:
8064      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
8065      break;
8066    case Token::BIT_XOR:
8067      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
8068      break;
8069    case Token::SAR:
8070      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
8071      break;
8072    case Token::SHL:
8073      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
8074      break;
8075    case Token::SHR:
8076      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
8077      break;
8078    default:
8079      UNREACHABLE();
8080  }
8081}
8082
8083
8084void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
8085  // If arguments are not passed in registers read them from the stack.
8086  if (!HasArgumentsInRegisters()) {
8087    __ movq(rax, Operand(rsp, 1 * kPointerSize));
8088    __ movq(rdx, Operand(rsp, 2 * kPointerSize));
8089  }
8090}
8091
8092
8093void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
8094  // If arguments are not passed in registers remove them from the stack before
8095  // returning.
8096  if (!HasArgumentsInRegisters()) {
8097    __ ret(2 * kPointerSize);  // Remove both operands
8098  } else {
8099    __ ret(0);
8100  }
8101}
8102
8103
8104int CompareStub::MinorKey() {
8105  // Encode the three parameters in a unique 16 bit value.
8106  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
8107  int nnn_value = (never_nan_nan_ ? 2 : 0);
8108  if (cc_ != equal) nnn_value = 0;  // Avoid duplicate stubs.
8109  return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
8110}
8111
8112
8113const char* CompareStub::GetName() {
8114  switch (cc_) {
8115    case less: return "CompareStub_LT";
8116    case greater: return "CompareStub_GT";
8117    case less_equal: return "CompareStub_LE";
8118    case greater_equal: return "CompareStub_GE";
8119    case not_equal: {
8120      if (strict_) {
8121        if (never_nan_nan_) {
8122          return "CompareStub_NE_STRICT_NO_NAN";
8123        } else {
8124          return "CompareStub_NE_STRICT";
8125        }
8126      } else {
8127        if (never_nan_nan_) {
8128          return "CompareStub_NE_NO_NAN";
8129        } else {
8130          return "CompareStub_NE";
8131        }
8132      }
8133    }
8134    case equal: {
8135      if (strict_) {
8136        if (never_nan_nan_) {
8137          return "CompareStub_EQ_STRICT_NO_NAN";
8138        } else {
8139          return "CompareStub_EQ_STRICT";
8140        }
8141      } else {
8142        if (never_nan_nan_) {
8143          return "CompareStub_EQ_NO_NAN";
8144        } else {
8145          return "CompareStub_EQ";
8146        }
8147      }
8148    }
8149    default: return "CompareStub";
8150  }
8151}
8152
8153
8154void StringAddStub::Generate(MacroAssembler* masm) {
8155  Label string_add_runtime;
8156
8157  // Load the two arguments.
8158  __ movq(rax, Operand(rsp, 2 * kPointerSize));  // First argument.
8159  __ movq(rdx, Operand(rsp, 1 * kPointerSize));  // Second argument.
8160
8161  // Make sure that both arguments are strings if not known in advance.
8162  if (string_check_) {
8163    Condition is_smi;
8164    is_smi = masm->CheckSmi(rax);
8165    __ j(is_smi, &string_add_runtime);
8166    __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
8167    __ j(above_equal, &string_add_runtime);
8168
8169    // First argument is a a string, test second.
8170    is_smi = masm->CheckSmi(rdx);
8171    __ j(is_smi, &string_add_runtime);
8172    __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
8173    __ j(above_equal, &string_add_runtime);
8174  }
8175
8176  // Both arguments are strings.
8177  // rax: first string
8178  // rdx: second string
8179  // Check if either of the strings are empty. In that case return the other.
8180  Label second_not_zero_length, both_not_zero_length;
8181  __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
8182  __ testl(rcx, rcx);
8183  __ j(not_zero, &second_not_zero_length);
8184  // Second string is empty, result is first string which is already in rax.
8185  __ IncrementCounter(&Counters::string_add_native, 1);
8186  __ ret(2 * kPointerSize);
8187  __ bind(&second_not_zero_length);
8188  __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
8189  __ testl(rbx, rbx);
8190  __ j(not_zero, &both_not_zero_length);
8191  // First string is empty, result is second string which is in rdx.
8192  __ movq(rax, rdx);
8193  __ IncrementCounter(&Counters::string_add_native, 1);
8194  __ ret(2 * kPointerSize);
8195
8196  // Both strings are non-empty.
8197  // rax: first string
8198  // rbx: length of first string
8199  // rcx: length of second string
8200  // rdx: second string
8201  // r8: instance type of first string if string check was performed above
8202  // r9: instance type of first string if string check was performed above
8203  Label string_add_flat_result;
8204  __ bind(&both_not_zero_length);
8205  // Look at the length of the result of adding the two strings.
8206  __ addl(rbx, rcx);
8207  // Use the runtime system when adding two one character strings, as it
8208  // contains optimizations for this specific case using the symbol table.
8209  __ cmpl(rbx, Immediate(2));
8210  __ j(equal, &string_add_runtime);
8211  // If arguments where known to be strings, maps are not loaded to r8 and r9
8212  // by the code above.
8213  if (!string_check_) {
8214    __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
8215    __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
8216  }
8217  // Get the instance types of the two strings as they will be needed soon.
8218  __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
8219  __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
8220  // Check if resulting string will be flat.
8221  __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
8222  __ j(below, &string_add_flat_result);
8223  // Handle exceptionally long strings in the runtime system.
8224  ASSERT((String::kMaxLength & 0x80000000) == 0);
8225  __ cmpl(rbx, Immediate(String::kMaxLength));
8226  __ j(above, &string_add_runtime);
8227
8228  // If result is not supposed to be flat, allocate a cons string object. If
8229  // both strings are ascii the result is an ascii cons string.
8230  // rax: first string
8231  // ebx: length of resulting flat string
8232  // rdx: second string
8233  // r8: instance type of first string
8234  // r9: instance type of second string
8235  Label non_ascii, allocated;
8236  __ movl(rcx, r8);
8237  __ and_(rcx, r9);
8238  ASSERT(kStringEncodingMask == kAsciiStringTag);
8239  __ testl(rcx, Immediate(kAsciiStringTag));
8240  __ j(zero, &non_ascii);
8241  // Allocate an acsii cons string.
8242  __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
8243  __ bind(&allocated);
8244  // Fill the fields of the cons string.
8245  __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
8246  __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
8247          Immediate(String::kEmptyHashField));
8248  __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
8249  __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
8250  __ movq(rax, rcx);
8251  __ IncrementCounter(&Counters::string_add_native, 1);
8252  __ ret(2 * kPointerSize);
8253  __ bind(&non_ascii);
8254  // Allocate a two byte cons string.
8255  __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
8256  __ jmp(&allocated);
8257
8258  // Handle creating a flat result. First check that both strings are not
8259  // external strings.
8260  // rax: first string
8261  // ebx: length of resulting flat string
8262  // rdx: second string
8263  // r8: instance type of first string
8264  // r9: instance type of first string
8265  __ bind(&string_add_flat_result);
8266  __ movl(rcx, r8);
8267  __ and_(rcx, Immediate(kStringRepresentationMask));
8268  __ cmpl(rcx, Immediate(kExternalStringTag));
8269  __ j(equal, &string_add_runtime);
8270  __ movl(rcx, r9);
8271  __ and_(rcx, Immediate(kStringRepresentationMask));
8272  __ cmpl(rcx, Immediate(kExternalStringTag));
8273  __ j(equal, &string_add_runtime);
8274  // Now check if both strings are ascii strings.
8275  // rax: first string
8276  // ebx: length of resulting flat string
8277  // rdx: second string
8278  // r8: instance type of first string
8279  // r9: instance type of second string
8280  Label non_ascii_string_add_flat_result;
8281  ASSERT(kStringEncodingMask == kAsciiStringTag);
8282  __ testl(r8, Immediate(kAsciiStringTag));
8283  __ j(zero, &non_ascii_string_add_flat_result);
8284  __ testl(r9, Immediate(kAsciiStringTag));
8285  __ j(zero, &string_add_runtime);
8286  // Both strings are ascii strings. As they are short they are both flat.
8287  __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
8288  // rcx: result string
8289  __ movq(rbx, rcx);
8290  // Locate first character of result.
8291  __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
8292  // Locate first character of first argument
8293  __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
8294  __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
8295  // rax: first char of first argument
8296  // rbx: result string
8297  // rcx: first character of result
8298  // rdx: second string
8299  // rdi: length of first argument
8300  GenerateCopyCharacters(masm, rcx, rax, rdi, true);
8301  // Locate first character of second argument.
8302  __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
8303  __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
8304  // rbx: result string
8305  // rcx: next character of result
8306  // rdx: first char of second argument
8307  // rdi: length of second argument
8308  GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
8309  __ movq(rax, rbx);
8310  __ IncrementCounter(&Counters::string_add_native, 1);
8311  __ ret(2 * kPointerSize);
8312
8313  // Handle creating a flat two byte result.
8314  // rax: first string - known to be two byte
8315  // rbx: length of resulting flat string
8316  // rdx: second string
8317  // r8: instance type of first string
8318  // r9: instance type of first string
8319  __ bind(&non_ascii_string_add_flat_result);
8320  __ and_(r9, Immediate(kAsciiStringTag));
8321  __ j(not_zero, &string_add_runtime);
8322  // Both strings are two byte strings. As they are short they are both
8323  // flat.
8324  __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
8325  // rcx: result string
8326  __ movq(rbx, rcx);
8327  // Locate first character of result.
8328  __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
8329  // Locate first character of first argument.
8330  __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
8331  __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
8332  // rax: first char of first argument
8333  // rbx: result string
8334  // rcx: first character of result
8335  // rdx: second argument
8336  // rdi: length of first argument
8337  GenerateCopyCharacters(masm, rcx, rax, rdi, false);
8338  // Locate first character of second argument.
8339  __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
8340  __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
8341  // rbx: result string
8342  // rcx: next character of result
8343  // rdx: first char of second argument
8344  // rdi: length of second argument
8345  GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
8346  __ movq(rax, rbx);
8347  __ IncrementCounter(&Counters::string_add_native, 1);
8348  __ ret(2 * kPointerSize);
8349
8350  // Just jump to runtime to add the two strings.
8351  __ bind(&string_add_runtime);
8352  __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
8353}
8354
8355
8356void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
8357                                            Register dest,
8358                                            Register src,
8359                                            Register count,
8360                                            bool ascii) {
8361  Label loop;
8362  __ bind(&loop);
8363  // This loop just copies one character at a time, as it is only used for very
8364  // short strings.
8365  if (ascii) {
8366    __ movb(kScratchRegister, Operand(src, 0));
8367    __ movb(Operand(dest, 0), kScratchRegister);
8368    __ addq(src, Immediate(1));
8369    __ addq(dest, Immediate(1));
8370  } else {
8371    __ movzxwl(kScratchRegister, Operand(src, 0));
8372    __ movw(Operand(dest, 0), kScratchRegister);
8373    __ addq(src, Immediate(2));
8374    __ addq(dest, Immediate(2));
8375  }
8376  __ subl(count, Immediate(1));
8377  __ j(not_zero, &loop);
8378}
8379
8380
8381void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
8382                                               Register dest,
8383                                               Register src,
8384                                               Register count,
8385                                               bool ascii) {
8386  // Copy characters using rep movs of doublewords. Align destination on 4 byte
8387  // boundary before starting rep movs. Copy remaining characters after running
8388  // rep movs.
8389  ASSERT(dest.is(rdi));  // rep movs destination
8390  ASSERT(src.is(rsi));  // rep movs source
8391  ASSERT(count.is(rcx));  // rep movs count
8392
8393  // Nothing to do for zero characters.
8394  Label done;
8395  __ testq(count, count);
8396  __ j(zero, &done);
8397
8398  // Make count the number of bytes to copy.
8399  if (!ascii) {
8400    ASSERT_EQ(2, sizeof(uc16));  // NOLINT
8401    __ addq(count, count);
8402  }
8403
8404  // Don't enter the rep movs if there are less than 4 bytes to copy.
8405  Label last_bytes;
8406  __ testq(count, Immediate(~7));
8407  __ j(zero, &last_bytes);
8408
8409  // Copy from edi to esi using rep movs instruction.
8410  __ movq(kScratchRegister, count);
8411  __ sar(count, Immediate(3));  // Number of doublewords to copy.
8412  __ repmovsq();
8413
8414  // Find number of bytes left.
8415  __ movq(count, kScratchRegister);
8416  __ and_(count, Immediate(7));
8417
8418  // Check if there are more bytes to copy.
8419  __ bind(&last_bytes);
8420  __ testq(count, count);
8421  __ j(zero, &done);
8422
8423  // Copy remaining characters.
8424  Label loop;
8425  __ bind(&loop);
8426  __ movb(kScratchRegister, Operand(src, 0));
8427  __ movb(Operand(dest, 0), kScratchRegister);
8428  __ addq(src, Immediate(1));
8429  __ addq(dest, Immediate(1));
8430  __ subq(count, Immediate(1));
8431  __ j(not_zero, &loop);
8432
8433  __ bind(&done);
8434}
8435
8436
8437void SubStringStub::Generate(MacroAssembler* masm) {
8438  Label runtime;
8439
8440  // Stack frame on entry.
8441  //  rsp[0]: return address
8442  //  rsp[8]: to
8443  //  rsp[16]: from
8444  //  rsp[24]: string
8445
8446  const int kToOffset = 1 * kPointerSize;
8447  const int kFromOffset = kToOffset + kPointerSize;
8448  const int kStringOffset = kFromOffset + kPointerSize;
8449  const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
8450
8451  // Make sure first argument is a string.
8452  __ movq(rax, Operand(rsp, kStringOffset));
8453  ASSERT_EQ(0, kSmiTag);
8454  __ testl(rax, Immediate(kSmiTagMask));
8455  __ j(zero, &runtime);
8456  Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
8457  __ j(NegateCondition(is_string), &runtime);
8458
8459  // rax: string
8460  // rbx: instance type
8461  // Calculate length of sub string using the smi values.
8462  __ movq(rcx, Operand(rsp, kToOffset));
8463  __ movq(rdx, Operand(rsp, kFromOffset));
8464  __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
8465
8466  __ SmiSub(rcx, rcx, rdx, NULL);  // Overflow doesn't happen.
8467  __ j(negative, &runtime);
8468  // Handle sub-strings of length 2 and less in the runtime system.
8469  __ SmiToInteger32(rcx, rcx);
8470  __ cmpl(rcx, Immediate(2));
8471  __ j(below_equal, &runtime);
8472
8473  // rax: string
8474  // rbx: instance type
8475  // rcx: result string length
8476  // Check for flat ascii string
8477  Label non_ascii_flat;
8478  __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
8479  __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
8480  __ j(not_equal, &non_ascii_flat);
8481
8482  // Allocate the result.
8483  __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
8484
8485  // rax: result string
8486  // rcx: result string length
8487  __ movq(rdx, rsi);  // esi used by following code.
8488  // Locate first character of result.
8489  __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
8490  // Load string argument and locate character of sub string start.
8491  __ movq(rsi, Operand(rsp, kStringOffset));
8492  __ movq(rbx, Operand(rsp, kFromOffset));
8493  {
8494    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
8495    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
8496                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
8497  }
8498
8499  // rax: result string
8500  // rcx: result length
8501  // rdx: original value of rsi
8502  // rdi: first character of result
8503  // rsi: character of sub string start
8504  GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
8505  __ movq(rsi, rdx);  // Restore rsi.
8506  __ IncrementCounter(&Counters::sub_string_native, 1);
8507  __ ret(kArgumentsSize);
8508
8509  __ bind(&non_ascii_flat);
8510  // rax: string
8511  // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
8512  // rcx: result string length
8513  // Check for sequential two byte string
8514  __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
8515  __ j(not_equal, &runtime);
8516
8517  // Allocate the result.
8518  __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
8519
8520  // rax: result string
8521  // rcx: result string length
8522  __ movq(rdx, rsi);  // esi used by following code.
8523  // Locate first character of result.
8524  __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
8525  // Load string argument and locate character of sub string start.
8526  __ movq(rsi, Operand(rsp, kStringOffset));
8527  __ movq(rbx, Operand(rsp, kFromOffset));
8528  {
8529    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
8530    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
8531                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
8532  }
8533
8534  // rax: result string
8535  // rcx: result length
8536  // rdx: original value of rsi
8537  // rdi: first character of result
8538  // rsi: character of sub string start
8539  GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
8540  __ movq(rsi, rdx);  // Restore esi.
8541  __ IncrementCounter(&Counters::sub_string_native, 1);
8542  __ ret(kArgumentsSize);
8543
8544  // Just jump to runtime to create the sub string.
8545  __ bind(&runtime);
8546  __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
8547}
8548
8549
8550void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
8551                                                        Register left,
8552                                                        Register right,
8553                                                        Register scratch1,
8554                                                        Register scratch2,
8555                                                        Register scratch3,
8556                                                        Register scratch4) {
8557  // Ensure that you can always subtract a string length from a non-negative
8558  // number (e.g. another length).
8559  ASSERT(String::kMaxLength < 0x7fffffff);
8560
8561  // Find minimum length and length difference.
8562  __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
8563  __ movl(scratch4, scratch1);
8564  __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
8565  // Register scratch4 now holds left.length - right.length.
8566  const Register length_difference = scratch4;
8567  Label left_shorter;
8568  __ j(less, &left_shorter);
8569  // The right string isn't longer that the left one.
8570  // Get the right string's length by subtracting the (non-negative) difference
8571  // from the left string's length.
8572  __ subl(scratch1, length_difference);
8573  __ bind(&left_shorter);
8574  // Register scratch1 now holds Min(left.length, right.length).
8575  const Register min_length = scratch1;
8576
8577  Label compare_lengths;
8578  // If min-length is zero, go directly to comparing lengths.
8579  __ testl(min_length, min_length);
8580  __ j(zero, &compare_lengths);
8581
8582  // Registers scratch2 and scratch3 are free.
8583  Label result_not_equal;
8584  Label loop;
8585  {
8586    // Check characters 0 .. min_length - 1 in a loop.
8587    // Use scratch3 as loop index, min_length as limit and scratch2
8588    // for computation.
8589    const Register index = scratch3;
8590    __ movl(index, Immediate(0));  // Index into strings.
8591    __ bind(&loop);
8592    // Compare characters.
8593    // TODO(lrn): Could we load more than one character at a time?
8594    __ movb(scratch2, FieldOperand(left,
8595                                   index,
8596                                   times_1,
8597                                   SeqAsciiString::kHeaderSize));
8598    // Increment index and use -1 modifier on next load to give
8599    // the previous load extra time to complete.
8600    __ addl(index, Immediate(1));
8601    __ cmpb(scratch2, FieldOperand(right,
8602                                   index,
8603                                   times_1,
8604                                   SeqAsciiString::kHeaderSize - 1));
8605    __ j(not_equal, &result_not_equal);
8606    __ cmpl(index, min_length);
8607    __ j(not_equal, &loop);
8608  }
8609  // Completed loop without finding different characters.
8610  // Compare lengths (precomputed).
8611  __ bind(&compare_lengths);
8612  __ testl(length_difference, length_difference);
8613  __ j(not_zero, &result_not_equal);
8614
8615  // Result is EQUAL.
8616  __ Move(rax, Smi::FromInt(EQUAL));
8617  __ ret(2 * kPointerSize);
8618
8619  Label result_greater;
8620  __ bind(&result_not_equal);
8621  // Unequal comparison of left to right, either character or length.
8622  __ j(greater, &result_greater);
8623
8624  // Result is LESS.
8625  __ Move(rax, Smi::FromInt(LESS));
8626  __ ret(2 * kPointerSize);
8627
8628  // Result is GREATER.
8629  __ bind(&result_greater);
8630  __ Move(rax, Smi::FromInt(GREATER));
8631  __ ret(2 * kPointerSize);
8632}
8633
8634
8635void StringCompareStub::Generate(MacroAssembler* masm) {
8636  Label runtime;
8637
8638  // Stack frame on entry.
8639  //  rsp[0]: return address
8640  //  rsp[8]: right string
8641  //  rsp[16]: left string
8642
8643  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // left
8644  __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
8645
8646  // Check for identity.
8647  Label not_same;
8648  __ cmpq(rdx, rax);
8649  __ j(not_equal, &not_same);
8650  __ Move(rax, Smi::FromInt(EQUAL));
8651  __ IncrementCounter(&Counters::string_compare_native, 1);
8652  __ ret(2 * kPointerSize);
8653
8654  __ bind(&not_same);
8655
8656  // Check that both are sequential ASCII strings.
8657  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
8658
8659  // Inline comparison of ascii strings.
8660  __ IncrementCounter(&Counters::string_compare_native, 1);
8661  GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
8662
8663  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
8664  // tagged as a small integer.
8665  __ bind(&runtime);
8666  __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
8667}
8668
8669#undef __
8670
8671#define __ masm.
8672
8673#ifdef _WIN64
8674typedef double (*ModuloFunction)(double, double);
8675// Define custom fmod implementation.
8676ModuloFunction CreateModuloFunction() {
8677  size_t actual_size;
8678  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
8679                                                 &actual_size,
8680                                                 true));
8681  CHECK(buffer);
8682  Assembler masm(buffer, static_cast<int>(actual_size));
8683  // Generated code is put into a fixed, unmovable, buffer, and not into
8684  // the V8 heap. We can't, and don't, refer to any relocatable addresses
8685  // (e.g. the JavaScript nan-object).
8686
8687  // Windows 64 ABI passes double arguments in xmm0, xmm1 and
8688  // returns result in xmm0.
8689  // Argument backing space is allocated on the stack above
8690  // the return address.
8691
8692  // Compute x mod y.
8693  // Load y and x (use argument backing store as temporary storage).
8694  __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
8695  __ movsd(Operand(rsp, kPointerSize), xmm0);
8696  __ fld_d(Operand(rsp, kPointerSize * 2));
8697  __ fld_d(Operand(rsp, kPointerSize));
8698
8699  // Clear exception flags before operation.
8700  {
8701    Label no_exceptions;
8702    __ fwait();
8703    __ fnstsw_ax();
8704    // Clear if Illegal Operand or Zero Division exceptions are set.
8705    __ testb(rax, Immediate(5));
8706    __ j(zero, &no_exceptions);
8707    __ fnclex();
8708    __ bind(&no_exceptions);
8709  }
8710
8711  // Compute st(0) % st(1)
8712  {
8713    Label partial_remainder_loop;
8714    __ bind(&partial_remainder_loop);
8715    __ fprem();
8716    __ fwait();
8717    __ fnstsw_ax();
8718    __ testl(rax, Immediate(0x400 /* C2 */));
8719    // If C2 is set, computation only has partial result. Loop to
8720    // continue computation.
8721    __ j(not_zero, &partial_remainder_loop);
8722  }
8723
8724  Label valid_result;
8725  Label return_result;
8726  // If Invalid Operand or Zero Division exceptions are set,
8727  // return NaN.
8728  __ testb(rax, Immediate(5));
8729  __ j(zero, &valid_result);
8730  __ fstp(0);  // Drop result in st(0).
8731  int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
8732  __ movq(rcx, kNaNValue, RelocInfo::NONE);
8733  __ movq(Operand(rsp, kPointerSize), rcx);
8734  __ movsd(xmm0, Operand(rsp, kPointerSize));
8735  __ jmp(&return_result);
8736
8737  // If result is valid, return that.
8738  __ bind(&valid_result);
8739  __ fstp_d(Operand(rsp, kPointerSize));
8740  __ movsd(xmm0, Operand(rsp, kPointerSize));
8741
8742  // Clean up FPU stack and exceptions and return xmm0
8743  __ bind(&return_result);
8744  __ fstp(0);  // Unload y.
8745
8746  Label clear_exceptions;
8747  __ testb(rax, Immediate(0x3f /* Any Exception*/));
8748  __ j(not_zero, &clear_exceptions);
8749  __ ret(0);
8750  __ bind(&clear_exceptions);
8751  __ fnclex();
8752  __ ret(0);
8753
8754  CodeDesc desc;
8755  masm.GetCode(&desc);
8756  // Call the function from C++.
8757  return FUNCTION_CAST<ModuloFunction>(buffer);
8758}
8759
8760#endif
8761
8762
8763#undef __
8764
8765} }  // namespace v8::internal
8766