1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_X87
6
7#include "src/crankshaft/x87/lithium-codegen-x87.h"
8
9#include "src/base/bits.h"
10#include "src/code-factory.h"
11#include "src/code-stubs.h"
12#include "src/codegen.h"
13#include "src/crankshaft/hydrogen-osr.h"
14#include "src/deoptimizer.h"
15#include "src/ic/ic.h"
16#include "src/ic/stub-cache.h"
17#include "src/x87/frames-x87.h"
18
19namespace v8 {
20namespace internal {
21
22// When invoking builtins, we need to record the safepoint in the middle of
23// the invoke instruction sequence generated by the macro assembler.
24class SafepointGenerator final : public CallWrapper {
25 public:
26  SafepointGenerator(LCodeGen* codegen,
27                     LPointerMap* pointers,
28                     Safepoint::DeoptMode mode)
29      : codegen_(codegen),
30        pointers_(pointers),
31        deopt_mode_(mode) {}
32  virtual ~SafepointGenerator() {}
33
34  void BeforeCall(int call_size) const override {}
35
36  void AfterCall() const override {
37    codegen_->RecordSafepoint(pointers_, deopt_mode_);
38  }
39
40 private:
41  LCodeGen* codegen_;
42  LPointerMap* pointers_;
43  Safepoint::DeoptMode deopt_mode_;
44};
45
46
47#define __ masm()->
48
49bool LCodeGen::GenerateCode() {
50  LPhase phase("Z_Code generation", chunk());
51  DCHECK(is_unused());
52  status_ = GENERATING;
53
54  // Open a frame scope to indicate that there is a frame on the stack.  The
55  // MANUAL indicates that the scope shouldn't actually generate code to set up
56  // the frame (that is done in GeneratePrologue).
57  FrameScope frame_scope(masm_, StackFrame::MANUAL);
58
59  return GeneratePrologue() &&
60      GenerateBody() &&
61      GenerateDeferredCode() &&
62      GenerateJumpTable() &&
63      GenerateSafepointTable();
64}
65
66
67void LCodeGen::FinishCode(Handle<Code> code) {
68  DCHECK(is_done());
69  code->set_stack_slots(GetTotalFrameSlotCount());
70  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
71  PopulateDeoptimizationData(code);
72  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
73    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
74  }
75}
76
77
78#ifdef _MSC_VER
79void LCodeGen::MakeSureStackPagesMapped(int offset) {
80  const int kPageSize = 4 * KB;
81  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
82    __ mov(Operand(esp, offset), eax);
83  }
84}
85#endif
86
87
88bool LCodeGen::GeneratePrologue() {
89  DCHECK(is_generating());
90
91  if (info()->IsOptimizing()) {
92    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
93  }
94
95  info()->set_prologue_offset(masm_->pc_offset());
96  if (NeedsEagerFrame()) {
97    DCHECK(!frame_is_built_);
98    frame_is_built_ = true;
99    if (info()->IsStub()) {
100      __ StubPrologue(StackFrame::STUB);
101    } else {
102      __ Prologue(info()->GeneratePreagedPrologue());
103    }
104  }
105
106  // Reserve space for the stack slots needed by the code.
107  int slots = GetStackSlotCount();
108  DCHECK(slots != 0 || !info()->IsOptimizing());
109  if (slots > 0) {
110    __ sub(Operand(esp), Immediate(slots * kPointerSize));
111#ifdef _MSC_VER
112    MakeSureStackPagesMapped(slots * kPointerSize);
113#endif
114    if (FLAG_debug_code) {
115      __ push(eax);
116      __ mov(Operand(eax), Immediate(slots));
117      Label loop;
118      __ bind(&loop);
119      __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
120      __ dec(eax);
121      __ j(not_zero, &loop);
122      __ pop(eax);
123    }
124  }
125
126  // Initailize FPU state.
127  __ fninit();
128
129  return !is_aborted();
130}
131
132
133void LCodeGen::DoPrologue(LPrologue* instr) {
134  Comment(";;; Prologue begin");
135
136  // Possibly allocate a local context.
137  if (info_->scope()->num_heap_slots() > 0) {
138    Comment(";;; Allocate local context");
139    bool need_write_barrier = true;
140    // Argument to NewContext is the function, which is still in edi.
141    int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
142    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
143    if (info()->scope()->is_script_scope()) {
144      __ push(edi);
145      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
146      __ CallRuntime(Runtime::kNewScriptContext);
147      deopt_mode = Safepoint::kLazyDeopt;
148    } else if (slots <= FastNewContextStub::kMaximumSlots) {
149      FastNewContextStub stub(isolate(), slots);
150      __ CallStub(&stub);
151      // Result of FastNewContextStub is always in new space.
152      need_write_barrier = false;
153    } else {
154      __ push(edi);
155      __ CallRuntime(Runtime::kNewFunctionContext);
156    }
157    RecordSafepoint(deopt_mode);
158
159    // Context is returned in eax.  It replaces the context passed to us.
160    // It's saved in the stack and kept live in esi.
161    __ mov(esi, eax);
162    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
163
164    // Copy parameters into context if necessary.
165    int num_parameters = scope()->num_parameters();
166    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
167    for (int i = first_parameter; i < num_parameters; i++) {
168      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
169      if (var->IsContextSlot()) {
170        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
171            (num_parameters - 1 - i) * kPointerSize;
172        // Load parameter from stack.
173        __ mov(eax, Operand(ebp, parameter_offset));
174        // Store it in the context.
175        int context_offset = Context::SlotOffset(var->index());
176        __ mov(Operand(esi, context_offset), eax);
177        // Update the write barrier. This clobbers eax and ebx.
178        if (need_write_barrier) {
179          __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
180                                    kDontSaveFPRegs);
181        } else if (FLAG_debug_code) {
182          Label done;
183          __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
184          __ Abort(kExpectedNewSpaceObject);
185          __ bind(&done);
186        }
187      }
188    }
189    Comment(";;; End allocate local context");
190  }
191
192  Comment(";;; Prologue end");
193}
194
195
196void LCodeGen::GenerateOsrPrologue() {
197  // Generate the OSR entry prologue at the first unknown OSR value, or if there
198  // are none, at the OSR entrypoint instruction.
199  if (osr_pc_offset_ >= 0) return;
200
201  osr_pc_offset_ = masm()->pc_offset();
202
203  // Interpreter is the first tier compiler now. It will run the code generated
204  // by TurboFan compiler which will always put "1" on x87 FPU stack.
205  // This behavior will affect crankshaft's x87 FPU stack depth check under
206  // debug mode.
207  // Need to reset the FPU stack here for this scenario.
208  __ fninit();
209
210  // Adjust the frame size, subsuming the unoptimized frame into the
211  // optimized frame.
212  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
213  DCHECK(slots >= 0);
214  __ sub(esp, Immediate(slots * kPointerSize));
215}
216
217
218void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
219  if (instr->IsCall()) {
220    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
221  }
222  if (!instr->IsLazyBailout() && !instr->IsGap()) {
223    safepoints_.BumpLastLazySafepointIndex();
224  }
225  FlushX87StackIfNecessary(instr);
226}
227
228
229void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
230  // When return from function call, FPU should be initialized again.
231  if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
232    bool double_result = instr->HasDoubleRegisterResult();
233    if (double_result) {
234      __ lea(esp, Operand(esp, -kDoubleSize));
235      __ fstp_d(Operand(esp, 0));
236    }
237    __ fninit();
238    if (double_result) {
239      __ fld_d(Operand(esp, 0));
240      __ lea(esp, Operand(esp, kDoubleSize));
241    }
242  }
243  if (instr->IsGoto()) {
244    x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
245  } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
246             !instr->IsGap() && !instr->IsReturn()) {
247    if (instr->ClobbersDoubleRegisters(isolate())) {
248      if (instr->HasDoubleRegisterResult()) {
249        DCHECK_EQ(1, x87_stack_.depth());
250      } else {
251        DCHECK_EQ(0, x87_stack_.depth());
252      }
253    }
254    __ VerifyX87StackDepth(x87_stack_.depth());
255  }
256}
257
258
259bool LCodeGen::GenerateJumpTable() {
260  if (!jump_table_.length()) return !is_aborted();
261
262  Label needs_frame;
263  Comment(";;; -------------------- Jump table --------------------");
264
265  for (int i = 0; i < jump_table_.length(); i++) {
266    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
267    __ bind(&table_entry->label);
268    Address entry = table_entry->address;
269    DeoptComment(table_entry->deopt_info);
270    if (table_entry->needs_frame) {
271      DCHECK(!info()->saves_caller_doubles());
272      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
273      __ call(&needs_frame);
274    } else {
275      __ call(entry, RelocInfo::RUNTIME_ENTRY);
276    }
277  }
278  if (needs_frame.is_linked()) {
279    __ bind(&needs_frame);
280    /* stack layout
281       3: entry address
282       2: return address  <-- esp
283       1: garbage
284       0: garbage
285    */
286    __ push(MemOperand(esp, 0));                 // Copy return address.
287    __ push(MemOperand(esp, 2 * kPointerSize));  // Copy entry address.
288
289    /* stack layout
290       4: entry address
291       3: return address
292       1: return address
293       0: entry address  <-- esp
294    */
295    __ mov(MemOperand(esp, 3 * kPointerSize), ebp);  // Save ebp.
296    // Fill ebp with the right stack frame address.
297    __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
298
299    // This variant of deopt can only be used with stubs. Since we don't
300    // have a function pointer to install in the stack frame that we're
301    // building, install a special marker there instead.
302    DCHECK(info()->IsStub());
303    __ mov(MemOperand(esp, 2 * kPointerSize),
304           Immediate(Smi::FromInt(StackFrame::STUB)));
305
306    /* stack layout
307       3: old ebp
308       2: stub marker
309       1: return address
310       0: entry address  <-- esp
311    */
312    __ ret(0);  // Call the continuation without clobbering registers.
313  }
314  return !is_aborted();
315}
316
317
318bool LCodeGen::GenerateDeferredCode() {
319  DCHECK(is_generating());
320  if (deferred_.length() > 0) {
321    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
322      LDeferredCode* code = deferred_[i];
323      X87Stack copy(code->x87_stack());
324      x87_stack_ = copy;
325
326      HValue* value =
327          instructions_->at(code->instruction_index())->hydrogen_value();
328      RecordAndWritePosition(
329          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
330
331      Comment(";;; <@%d,#%d> "
332              "-------------------- Deferred %s --------------------",
333              code->instruction_index(),
334              code->instr()->hydrogen_value()->id(),
335              code->instr()->Mnemonic());
336      __ bind(code->entry());
337      if (NeedsDeferredFrame()) {
338        Comment(";;; Build frame");
339        DCHECK(!frame_is_built_);
340        DCHECK(info()->IsStub());
341        frame_is_built_ = true;
342        // Build the frame in such a way that esi isn't trashed.
343        __ push(ebp);  // Caller's frame pointer.
344        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
345        __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
346        Comment(";;; Deferred code");
347      }
348      code->Generate();
349      if (NeedsDeferredFrame()) {
350        __ bind(code->done());
351        Comment(";;; Destroy frame");
352        DCHECK(frame_is_built_);
353        frame_is_built_ = false;
354        __ mov(esp, ebp);
355        __ pop(ebp);
356      }
357      __ jmp(code->exit());
358    }
359  }
360
361  // Deferred code is the last part of the instruction sequence. Mark
362  // the generated code as done unless we bailed out.
363  if (!is_aborted()) status_ = DONE;
364  return !is_aborted();
365}
366
367
368bool LCodeGen::GenerateSafepointTable() {
369  DCHECK(is_done());
370  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
371    // For lazy deoptimization we need space to patch a call after every call.
372    // Ensure there is always space for such patching, even if the code ends
373    // in a call.
374    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
375    while (masm()->pc_offset() < target_offset) {
376      masm()->nop();
377    }
378  }
379  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
380  return !is_aborted();
381}
382
383
384Register LCodeGen::ToRegister(int code) const {
385  return Register::from_code(code);
386}
387
388
389X87Register LCodeGen::ToX87Register(int code) const {
390  return X87Register::from_code(code);
391}
392
393
394void LCodeGen::X87LoadForUsage(X87Register reg) {
395  DCHECK(x87_stack_.Contains(reg));
396  x87_stack_.Fxch(reg);
397  x87_stack_.pop();
398}
399
400
401void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
402  DCHECK(x87_stack_.Contains(reg1));
403  DCHECK(x87_stack_.Contains(reg2));
404  if (reg1.is(reg2) && x87_stack_.depth() == 1) {
405    __ fld(x87_stack_.st(reg1));
406    x87_stack_.push(reg1);
407    x87_stack_.pop();
408    x87_stack_.pop();
409  } else {
410    x87_stack_.Fxch(reg1, 1);
411    x87_stack_.Fxch(reg2);
412    x87_stack_.pop();
413    x87_stack_.pop();
414  }
415}
416
417
418int LCodeGen::X87Stack::GetLayout() {
419  int layout = stack_depth_;
420  for (int i = 0; i < stack_depth_; i++) {
421    layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
422  }
423
424  return layout;
425}
426
427
428void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
429  DCHECK(is_mutable_);
430  DCHECK(Contains(reg) && stack_depth_ > other_slot);
431  int i  = ArrayIndex(reg);
432  int st = st2idx(i);
433  if (st != other_slot) {
434    int other_i = st2idx(other_slot);
435    X87Register other = stack_[other_i];
436    stack_[other_i]   = reg;
437    stack_[i]         = other;
438    if (st == 0) {
439      __ fxch(other_slot);
440    } else if (other_slot == 0) {
441      __ fxch(st);
442    } else {
443      __ fxch(st);
444      __ fxch(other_slot);
445      __ fxch(st);
446    }
447  }
448}
449
450
451int LCodeGen::X87Stack::st2idx(int pos) {
452  return stack_depth_ - pos - 1;
453}
454
455
456int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
457  for (int i = 0; i < stack_depth_; i++) {
458    if (stack_[i].is(reg)) return i;
459  }
460  UNREACHABLE();
461  return -1;
462}
463
464
465bool LCodeGen::X87Stack::Contains(X87Register reg) {
466  for (int i = 0; i < stack_depth_; i++) {
467    if (stack_[i].is(reg)) return true;
468  }
469  return false;
470}
471
472
473void LCodeGen::X87Stack::Free(X87Register reg) {
474  DCHECK(is_mutable_);
475  DCHECK(Contains(reg));
476  int i  = ArrayIndex(reg);
477  int st = st2idx(i);
478  if (st > 0) {
479    // keep track of how fstp(i) changes the order of elements
480    int tos_i = st2idx(0);
481    stack_[i] = stack_[tos_i];
482  }
483  pop();
484  __ fstp(st);
485}
486
487
488void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
489  if (x87_stack_.Contains(dst)) {
490    x87_stack_.Fxch(dst);
491    __ fstp(0);
492  } else {
493    x87_stack_.push(dst);
494  }
495  X87Fld(src, opts);
496}
497
498
499void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
500  if (x87_stack_.Contains(dst)) {
501    x87_stack_.Fxch(dst);
502    __ fstp(0);
503    x87_stack_.pop();
504    // Push ST(i) onto the FPU register stack
505    __ fld(x87_stack_.st(src));
506    x87_stack_.push(dst);
507  } else {
508    // Push ST(i) onto the FPU register stack
509    __ fld(x87_stack_.st(src));
510    x87_stack_.push(dst);
511  }
512}
513
514
515void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
516  DCHECK(!src.is_reg_only());
517  switch (opts) {
518    case kX87DoubleOperand:
519      __ fld_d(src);
520      break;
521    case kX87FloatOperand:
522      __ fld_s(src);
523      break;
524    case kX87IntOperand:
525      __ fild_s(src);
526      break;
527    default:
528      UNREACHABLE();
529  }
530}
531
532
533void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
534  DCHECK(!dst.is_reg_only());
535  x87_stack_.Fxch(src);
536  switch (opts) {
537    case kX87DoubleOperand:
538      __ fst_d(dst);
539      break;
540    case kX87FloatOperand:
541      __ fst_s(dst);
542      break;
543    case kX87IntOperand:
544      __ fist_s(dst);
545      break;
546    default:
547      UNREACHABLE();
548  }
549}
550
551
552void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
553  DCHECK(is_mutable_);
554  if (Contains(reg)) {
555    Free(reg);
556  }
557  // Mark this register as the next register to write to
558  stack_[stack_depth_] = reg;
559}
560
561
562void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
563  DCHECK(is_mutable_);
564  // Assert the reg is prepared to write, but not on the virtual stack yet
565  DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
566         stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
567  stack_depth_++;
568}
569
570
571void LCodeGen::X87PrepareBinaryOp(
572    X87Register left, X87Register right, X87Register result) {
573  // You need to use DefineSameAsFirst for x87 instructions
574  DCHECK(result.is(left));
575  x87_stack_.Fxch(right, 1);
576  x87_stack_.Fxch(left);
577}
578
579
580void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
581  if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
582    bool double_inputs = instr->HasDoubleRegisterInput();
583
584    // Flush stack from tos down, since FreeX87() will mess with tos
585    for (int i = stack_depth_-1; i >= 0; i--) {
586      X87Register reg = stack_[i];
587      // Skip registers which contain the inputs for the next instruction
588      // when flushing the stack
589      if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
590        continue;
591      }
592      Free(reg);
593      if (i < stack_depth_-1) i++;
594    }
595  }
596  if (instr->IsReturn()) {
597    while (stack_depth_ > 0) {
598      __ fstp(0);
599      stack_depth_--;
600    }
601    if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
602  }
603}
604
605
606void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
607                                      LCodeGen* cgen) {
608  // For going to a joined block, an explicit LClobberDoubles is inserted before
609  // LGoto. Because all used x87 registers are spilled to stack slots. The
610  // ResolvePhis phase of register allocator could guarantee the two input's x87
611  // stacks have the same layout. So don't check stack_depth_ <= 1 here.
612  int goto_block_id = goto_instr->block_id();
613  if (current_block_id + 1 != goto_block_id) {
614    // If we have a value on the x87 stack on leaving a block, it must be a
615    // phi input. If the next block we compile is not the join block, we have
616    // to discard the stack state.
617    // Before discarding the stack state, we need to save it if the "goto block"
618    // has unreachable last predecessor when FLAG_unreachable_code_elimination.
619    if (FLAG_unreachable_code_elimination) {
620      int length = goto_instr->block()->predecessors()->length();
621      bool has_unreachable_last_predecessor = false;
622      for (int i = 0; i < length; i++) {
623        HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
624        if (block->IsUnreachable() &&
625            (block->block_id() + 1) == goto_block_id) {
626          has_unreachable_last_predecessor = true;
627        }
628      }
629      if (has_unreachable_last_predecessor) {
630        if (cgen->x87_stack_map_.find(goto_block_id) ==
631            cgen->x87_stack_map_.end()) {
632          X87Stack* stack = new (cgen->zone()) X87Stack(*this);
633          cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
634        }
635      }
636    }
637
638    // Discard the stack state.
639    stack_depth_ = 0;
640  }
641}
642
643
644void LCodeGen::EmitFlushX87ForDeopt() {
645  // The deoptimizer does not support X87 Registers. But as long as we
646  // deopt from a stub its not a problem, since we will re-materialize the
647  // original stub inputs, which can't be double registers.
648  // DCHECK(info()->IsStub());
649  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
650    __ pushfd();
651    __ VerifyX87StackDepth(x87_stack_.depth());
652    __ popfd();
653  }
654
655  // Flush X87 stack in the deoptimizer entry.
656}
657
658
659Register LCodeGen::ToRegister(LOperand* op) const {
660  DCHECK(op->IsRegister());
661  return ToRegister(op->index());
662}
663
664
665X87Register LCodeGen::ToX87Register(LOperand* op) const {
666  DCHECK(op->IsDoubleRegister());
667  return ToX87Register(op->index());
668}
669
670
671int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
672  return ToRepresentation(op, Representation::Integer32());
673}
674
675
676int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
677                                   const Representation& r) const {
678  HConstant* constant = chunk_->LookupConstant(op);
679  if (r.IsExternal()) {
680    return reinterpret_cast<int32_t>(
681        constant->ExternalReferenceValue().address());
682  }
683  int32_t value = constant->Integer32Value();
684  if (r.IsInteger32()) return value;
685  DCHECK(r.IsSmiOrTagged());
686  return reinterpret_cast<int32_t>(Smi::FromInt(value));
687}
688
689
690Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
691  HConstant* constant = chunk_->LookupConstant(op);
692  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
693  return constant->handle(isolate());
694}
695
696
697double LCodeGen::ToDouble(LConstantOperand* op) const {
698  HConstant* constant = chunk_->LookupConstant(op);
699  DCHECK(constant->HasDoubleValue());
700  return constant->DoubleValue();
701}
702
703
704ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
705  HConstant* constant = chunk_->LookupConstant(op);
706  DCHECK(constant->HasExternalReferenceValue());
707  return constant->ExternalReferenceValue();
708}
709
710
711bool LCodeGen::IsInteger32(LConstantOperand* op) const {
712  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
713}
714
715
716bool LCodeGen::IsSmi(LConstantOperand* op) const {
717  return chunk_->LookupLiteralRepresentation(op).IsSmi();
718}
719
720
721static int ArgumentsOffsetWithoutFrame(int index) {
722  DCHECK(index < 0);
723  return -(index + 1) * kPointerSize + kPCOnStackSize;
724}
725
726
727Operand LCodeGen::ToOperand(LOperand* op) const {
728  if (op->IsRegister()) return Operand(ToRegister(op));
729  DCHECK(!op->IsDoubleRegister());
730  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
731  if (NeedsEagerFrame()) {
732    return Operand(ebp, FrameSlotToFPOffset(op->index()));
733  } else {
734    // Retrieve parameter without eager stack-frame relative to the
735    // stack-pointer.
736    return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
737  }
738}
739
740
741Operand LCodeGen::HighOperand(LOperand* op) {
742  DCHECK(op->IsDoubleStackSlot());
743  if (NeedsEagerFrame()) {
744    return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
745  } else {
746    // Retrieve parameter without eager stack-frame relative to the
747    // stack-pointer.
748    return Operand(
749        esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
750  }
751}
752
753
754void LCodeGen::WriteTranslation(LEnvironment* environment,
755                                Translation* translation) {
756  if (environment == NULL) return;
757
758  // The translation includes one command per value in the environment.
759  int translation_size = environment->translation_size();
760
761  WriteTranslation(environment->outer(), translation);
762  WriteTranslationFrame(environment, translation);
763
764  int object_index = 0;
765  int dematerialized_index = 0;
766  for (int i = 0; i < translation_size; ++i) {
767    LOperand* value = environment->values()->at(i);
768    AddToTranslation(environment,
769                     translation,
770                     value,
771                     environment->HasTaggedValueAt(i),
772                     environment->HasUint32ValueAt(i),
773                     &object_index,
774                     &dematerialized_index);
775  }
776}
777
778
779void LCodeGen::AddToTranslation(LEnvironment* environment,
780                                Translation* translation,
781                                LOperand* op,
782                                bool is_tagged,
783                                bool is_uint32,
784                                int* object_index_pointer,
785                                int* dematerialized_index_pointer) {
786  if (op == LEnvironment::materialization_marker()) {
787    int object_index = (*object_index_pointer)++;
788    if (environment->ObjectIsDuplicateAt(object_index)) {
789      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
790      translation->DuplicateObject(dupe_of);
791      return;
792    }
793    int object_length = environment->ObjectLengthAt(object_index);
794    if (environment->ObjectIsArgumentsAt(object_index)) {
795      translation->BeginArgumentsObject(object_length);
796    } else {
797      translation->BeginCapturedObject(object_length);
798    }
799    int dematerialized_index = *dematerialized_index_pointer;
800    int env_offset = environment->translation_size() + dematerialized_index;
801    *dematerialized_index_pointer += object_length;
802    for (int i = 0; i < object_length; ++i) {
803      LOperand* value = environment->values()->at(env_offset + i);
804      AddToTranslation(environment,
805                       translation,
806                       value,
807                       environment->HasTaggedValueAt(env_offset + i),
808                       environment->HasUint32ValueAt(env_offset + i),
809                       object_index_pointer,
810                       dematerialized_index_pointer);
811    }
812    return;
813  }
814
815  if (op->IsStackSlot()) {
816    int index = op->index();
817    if (is_tagged) {
818      translation->StoreStackSlot(index);
819    } else if (is_uint32) {
820      translation->StoreUint32StackSlot(index);
821    } else {
822      translation->StoreInt32StackSlot(index);
823    }
824  } else if (op->IsDoubleStackSlot()) {
825    int index = op->index();
826    translation->StoreDoubleStackSlot(index);
827  } else if (op->IsRegister()) {
828    Register reg = ToRegister(op);
829    if (is_tagged) {
830      translation->StoreRegister(reg);
831    } else if (is_uint32) {
832      translation->StoreUint32Register(reg);
833    } else {
834      translation->StoreInt32Register(reg);
835    }
836  } else if (op->IsDoubleRegister()) {
837    X87Register reg = ToX87Register(op);
838    translation->StoreDoubleRegister(reg);
839  } else if (op->IsConstantOperand()) {
840    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
841    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
842    translation->StoreLiteral(src_index);
843  } else {
844    UNREACHABLE();
845  }
846}
847
848
849void LCodeGen::CallCodeGeneric(Handle<Code> code,
850                               RelocInfo::Mode mode,
851                               LInstruction* instr,
852                               SafepointMode safepoint_mode) {
853  DCHECK(instr != NULL);
854  __ call(code, mode);
855  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
856
857  // Signal that we don't inline smi code before these stubs in the
858  // optimizing code generator.
859  if (code->kind() == Code::BINARY_OP_IC ||
860      code->kind() == Code::COMPARE_IC) {
861    __ nop();
862  }
863}
864
865
866void LCodeGen::CallCode(Handle<Code> code,
867                        RelocInfo::Mode mode,
868                        LInstruction* instr) {
869  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
870}
871
872
873void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
874                           LInstruction* instr, SaveFPRegsMode save_doubles) {
875  DCHECK(instr != NULL);
876  DCHECK(instr->HasPointerMap());
877
878  __ CallRuntime(fun, argc, save_doubles);
879
880  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
881
882  DCHECK(info()->is_calling());
883}
884
885
886void LCodeGen::LoadContextFromDeferred(LOperand* context) {
887  if (context->IsRegister()) {
888    if (!ToRegister(context).is(esi)) {
889      __ mov(esi, ToRegister(context));
890    }
891  } else if (context->IsStackSlot()) {
892    __ mov(esi, ToOperand(context));
893  } else if (context->IsConstantOperand()) {
894    HConstant* constant =
895        chunk_->LookupConstant(LConstantOperand::cast(context));
896    __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
897  } else {
898    UNREACHABLE();
899  }
900}
901
902void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
903                                       int argc,
904                                       LInstruction* instr,
905                                       LOperand* context) {
906  LoadContextFromDeferred(context);
907
908  __ CallRuntimeSaveDoubles(id);
909  RecordSafepointWithRegisters(
910      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
911
912  DCHECK(info()->is_calling());
913}
914
915
916void LCodeGen::RegisterEnvironmentForDeoptimization(
917    LEnvironment* environment, Safepoint::DeoptMode mode) {
918  environment->set_has_been_used();
919  if (!environment->HasBeenRegistered()) {
920    // Physical stack frame layout:
921    // -x ............. -4  0 ..................................... y
922    // [incoming arguments] [spill slots] [pushed outgoing arguments]
923
924    // Layout of the environment:
925    // 0 ..................................................... size-1
926    // [parameters] [locals] [expression stack including arguments]
927
928    // Layout of the translation:
929    // 0 ........................................................ size - 1 + 4
930    // [expression stack including arguments] [locals] [4 words] [parameters]
931    // |>------------  translation_size ------------<|
932
933    int frame_count = 0;
934    int jsframe_count = 0;
935    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
936      ++frame_count;
937      if (e->frame_type() == JS_FUNCTION) {
938        ++jsframe_count;
939      }
940    }
941    Translation translation(&translations_, frame_count, jsframe_count, zone());
942    WriteTranslation(environment, &translation);
943    int deoptimization_index = deoptimizations_.length();
944    int pc_offset = masm()->pc_offset();
945    environment->Register(deoptimization_index,
946                          translation.index(),
947                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
948    deoptimizations_.Add(environment, zone());
949  }
950}
951
952
953void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
954                            Deoptimizer::DeoptReason deopt_reason,
955                            Deoptimizer::BailoutType bailout_type) {
956  LEnvironment* environment = instr->environment();
957  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
958  DCHECK(environment->HasBeenRegistered());
959  int id = environment->deoptimization_index();
960  Address entry =
961      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
962  if (entry == NULL) {
963    Abort(kBailoutWasNotPrepared);
964    return;
965  }
966
967  if (DeoptEveryNTimes()) {
968    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
969    Label no_deopt;
970    __ pushfd();
971    __ push(eax);
972    __ mov(eax, Operand::StaticVariable(count));
973    __ sub(eax, Immediate(1));
974    __ j(not_zero, &no_deopt, Label::kNear);
975    if (FLAG_trap_on_deopt) __ int3();
976    __ mov(eax, Immediate(FLAG_deopt_every_n_times));
977    __ mov(Operand::StaticVariable(count), eax);
978    __ pop(eax);
979    __ popfd();
980    DCHECK(frame_is_built_);
981    // Put the x87 stack layout in TOS.
982    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
983    __ push(Immediate(x87_stack_.GetLayout()));
984    __ fild_s(MemOperand(esp, 0));
985    // Don't touch eflags.
986    __ lea(esp, Operand(esp, kPointerSize));
987    __ call(entry, RelocInfo::RUNTIME_ENTRY);
988    __ bind(&no_deopt);
989    __ mov(Operand::StaticVariable(count), eax);
990    __ pop(eax);
991    __ popfd();
992  }
993
994  // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
995  // the correct location.
996  {
997    Label done;
998    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
999    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
1000
1001    int x87_stack_layout = x87_stack_.GetLayout();
1002    __ push(Immediate(x87_stack_layout));
1003    __ fild_s(MemOperand(esp, 0));
1004    // Don't touch eflags.
1005    __ lea(esp, Operand(esp, kPointerSize));
1006    __ bind(&done);
1007  }
1008
1009  if (info()->ShouldTrapOnDeopt()) {
1010    Label done;
1011    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1012    __ int3();
1013    __ bind(&done);
1014  }
1015
1016  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
1017
1018  DCHECK(info()->IsStub() || frame_is_built_);
1019  if (cc == no_condition && frame_is_built_) {
1020    DeoptComment(deopt_info);
1021    __ call(entry, RelocInfo::RUNTIME_ENTRY);
1022  } else {
1023    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
1024                                            !frame_is_built_);
1025    // We often have several deopts to the same entry, reuse the last
1026    // jump entry if this is the case.
1027    if (FLAG_trace_deopt || isolate()->is_profiling() ||
1028        jump_table_.is_empty() ||
1029        !table_entry.IsEquivalentTo(jump_table_.last())) {
1030      jump_table_.Add(table_entry, zone());
1031    }
1032    if (cc == no_condition) {
1033      __ jmp(&jump_table_.last().label);
1034    } else {
1035      __ j(cc, &jump_table_.last().label);
1036    }
1037  }
1038}
1039
1040
1041void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
1042                            Deoptimizer::DeoptReason deopt_reason) {
1043  Deoptimizer::BailoutType bailout_type = info()->IsStub()
1044      ? Deoptimizer::LAZY
1045      : Deoptimizer::EAGER;
1046  DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
1047}
1048
1049
1050void LCodeGen::RecordSafepointWithLazyDeopt(
1051    LInstruction* instr, SafepointMode safepoint_mode) {
1052  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1053    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1054  } else {
1055    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1056    RecordSafepointWithRegisters(
1057        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1058  }
1059}
1060
1061
1062void LCodeGen::RecordSafepoint(
1063    LPointerMap* pointers,
1064    Safepoint::Kind kind,
1065    int arguments,
1066    Safepoint::DeoptMode deopt_mode) {
1067  DCHECK(kind == expected_safepoint_kind_);
1068  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1069  Safepoint safepoint =
1070      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1071  for (int i = 0; i < operands->length(); i++) {
1072    LOperand* pointer = operands->at(i);
1073    if (pointer->IsStackSlot()) {
1074      safepoint.DefinePointerSlot(pointer->index(), zone());
1075    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1076      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1077    }
1078  }
1079}
1080
1081
1082void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1083                               Safepoint::DeoptMode mode) {
1084  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1085}
1086
1087
1088void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1089  LPointerMap empty_pointers(zone());
1090  RecordSafepoint(&empty_pointers, mode);
1091}
1092
1093
1094void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1095                                            int arguments,
1096                                            Safepoint::DeoptMode mode) {
1097  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1098}
1099
1100
1101void LCodeGen::RecordAndWritePosition(int position) {
1102  if (position == RelocInfo::kNoPosition) return;
1103  masm()->positions_recorder()->RecordPosition(position);
1104}
1105
1106
1107static const char* LabelType(LLabel* label) {
1108  if (label->is_loop_header()) return " (loop header)";
1109  if (label->is_osr_entry()) return " (OSR entry)";
1110  return "";
1111}
1112
1113
1114void LCodeGen::DoLabel(LLabel* label) {
1115  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1116          current_instruction_,
1117          label->hydrogen_value()->id(),
1118          label->block_id(),
1119          LabelType(label));
1120  __ bind(label->label());
1121  current_block_ = label->block_id();
1122  if (label->block()->predecessors()->length() > 1) {
1123    // A join block's x87 stack is that of its last visited predecessor.
1124    // If the last visited predecessor block is unreachable, the stack state
1125    // will be wrong. In such case, use the x87 stack of reachable predecessor.
1126    X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
1127    // Restore x87 stack.
1128    if (it != x87_stack_map_.end()) {
1129      x87_stack_ = *(it->second);
1130    }
1131  }
1132  DoGap(label);
1133}
1134
1135
1136void LCodeGen::DoParallelMove(LParallelMove* move) {
1137  resolver_.Resolve(move);
1138}
1139
1140
1141void LCodeGen::DoGap(LGap* gap) {
1142  for (int i = LGap::FIRST_INNER_POSITION;
1143       i <= LGap::LAST_INNER_POSITION;
1144       i++) {
1145    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1146    LParallelMove* move = gap->GetParallelMove(inner_pos);
1147    if (move != NULL) DoParallelMove(move);
1148  }
1149}
1150
1151
1152void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1153  DoGap(instr);
1154}
1155
1156
1157void LCodeGen::DoParameter(LParameter* instr) {
1158  // Nothing to do.
1159}
1160
1161
1162void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1163  GenerateOsrPrologue();
1164}
1165
1166
1167void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1168  Register dividend = ToRegister(instr->dividend());
1169  int32_t divisor = instr->divisor();
1170  DCHECK(dividend.is(ToRegister(instr->result())));
1171
1172  // Theoretically, a variation of the branch-free code for integer division by
1173  // a power of 2 (calculating the remainder via an additional multiplication
1174  // (which gets simplified to an 'and') and subtraction) should be faster, and
1175  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1176  // indicate that positive dividends are heavily favored, so the branching
1177  // version performs better.
1178  HMod* hmod = instr->hydrogen();
1179  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1180  Label dividend_is_not_negative, done;
1181  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1182    __ test(dividend, dividend);
1183    __ j(not_sign, &dividend_is_not_negative, Label::kNear);
1184    // Note that this is correct even for kMinInt operands.
1185    __ neg(dividend);
1186    __ and_(dividend, mask);
1187    __ neg(dividend);
1188    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1189      DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1190    }
1191    __ jmp(&done, Label::kNear);
1192  }
1193
1194  __ bind(&dividend_is_not_negative);
1195  __ and_(dividend, mask);
1196  __ bind(&done);
1197}
1198
1199
1200void LCodeGen::DoModByConstI(LModByConstI* instr) {
1201  Register dividend = ToRegister(instr->dividend());
1202  int32_t divisor = instr->divisor();
1203  DCHECK(ToRegister(instr->result()).is(eax));
1204
1205  if (divisor == 0) {
1206    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1207    return;
1208  }
1209
1210  __ TruncatingDiv(dividend, Abs(divisor));
1211  __ imul(edx, edx, Abs(divisor));
1212  __ mov(eax, dividend);
1213  __ sub(eax, edx);
1214
1215  // Check for negative zero.
1216  HMod* hmod = instr->hydrogen();
1217  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1218    Label remainder_not_zero;
1219    __ j(not_zero, &remainder_not_zero, Label::kNear);
1220    __ cmp(dividend, Immediate(0));
1221    DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1222    __ bind(&remainder_not_zero);
1223  }
1224}
1225
1226
1227void LCodeGen::DoModI(LModI* instr) {
1228  HMod* hmod = instr->hydrogen();
1229
1230  Register left_reg = ToRegister(instr->left());
1231  DCHECK(left_reg.is(eax));
1232  Register right_reg = ToRegister(instr->right());
1233  DCHECK(!right_reg.is(eax));
1234  DCHECK(!right_reg.is(edx));
1235  Register result_reg = ToRegister(instr->result());
1236  DCHECK(result_reg.is(edx));
1237
1238  Label done;
1239  // Check for x % 0, idiv would signal a divide error. We have to
1240  // deopt in this case because we can't return a NaN.
1241  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1242    __ test(right_reg, Operand(right_reg));
1243    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1244  }
1245
1246  // Check for kMinInt % -1, idiv would signal a divide error. We
1247  // have to deopt if we care about -0, because we can't return that.
1248  if (hmod->CheckFlag(HValue::kCanOverflow)) {
1249    Label no_overflow_possible;
1250    __ cmp(left_reg, kMinInt);
1251    __ j(not_equal, &no_overflow_possible, Label::kNear);
1252    __ cmp(right_reg, -1);
1253    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1254      DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
1255    } else {
1256      __ j(not_equal, &no_overflow_possible, Label::kNear);
1257      __ Move(result_reg, Immediate(0));
1258      __ jmp(&done, Label::kNear);
1259    }
1260    __ bind(&no_overflow_possible);
1261  }
1262
1263  // Sign extend dividend in eax into edx:eax.
1264  __ cdq();
1265
1266  // If we care about -0, test if the dividend is <0 and the result is 0.
1267  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1268    Label positive_left;
1269    __ test(left_reg, Operand(left_reg));
1270    __ j(not_sign, &positive_left, Label::kNear);
1271    __ idiv(right_reg);
1272    __ test(result_reg, Operand(result_reg));
1273    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1274    __ jmp(&done, Label::kNear);
1275    __ bind(&positive_left);
1276  }
1277  __ idiv(right_reg);
1278  __ bind(&done);
1279}
1280
1281
1282void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1283  Register dividend = ToRegister(instr->dividend());
1284  int32_t divisor = instr->divisor();
1285  Register result = ToRegister(instr->result());
1286  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1287  DCHECK(!result.is(dividend));
1288
1289  // Check for (0 / -x) that will produce negative zero.
1290  HDiv* hdiv = instr->hydrogen();
1291  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1292    __ test(dividend, dividend);
1293    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1294  }
1295  // Check for (kMinInt / -1).
1296  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1297    __ cmp(dividend, kMinInt);
1298    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1299  }
1300  // Deoptimize if remainder will not be 0.
1301  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1302      divisor != 1 && divisor != -1) {
1303    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1304    __ test(dividend, Immediate(mask));
1305    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1306  }
1307  __ Move(result, dividend);
1308  int32_t shift = WhichPowerOf2Abs(divisor);
1309  if (shift > 0) {
1310    // The arithmetic shift is always OK, the 'if' is an optimization only.
1311    if (shift > 1) __ sar(result, 31);
1312    __ shr(result, 32 - shift);
1313    __ add(result, dividend);
1314    __ sar(result, shift);
1315  }
1316  if (divisor < 0) __ neg(result);
1317}
1318
1319
1320void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1321  Register dividend = ToRegister(instr->dividend());
1322  int32_t divisor = instr->divisor();
1323  DCHECK(ToRegister(instr->result()).is(edx));
1324
1325  if (divisor == 0) {
1326    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1327    return;
1328  }
1329
1330  // Check for (0 / -x) that will produce negative zero.
1331  HDiv* hdiv = instr->hydrogen();
1332  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1333    __ test(dividend, dividend);
1334    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1335  }
1336
1337  __ TruncatingDiv(dividend, Abs(divisor));
1338  if (divisor < 0) __ neg(edx);
1339
1340  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1341    __ mov(eax, edx);
1342    __ imul(eax, eax, divisor);
1343    __ sub(eax, dividend);
1344    DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
1345  }
1346}
1347
1348
1349// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1350void LCodeGen::DoDivI(LDivI* instr) {
1351  HBinaryOperation* hdiv = instr->hydrogen();
1352  Register dividend = ToRegister(instr->dividend());
1353  Register divisor = ToRegister(instr->divisor());
1354  Register remainder = ToRegister(instr->temp());
1355  DCHECK(dividend.is(eax));
1356  DCHECK(remainder.is(edx));
1357  DCHECK(ToRegister(instr->result()).is(eax));
1358  DCHECK(!divisor.is(eax));
1359  DCHECK(!divisor.is(edx));
1360
1361  // Check for x / 0.
1362  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1363    __ test(divisor, divisor);
1364    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1365  }
1366
1367  // Check for (0 / -x) that will produce negative zero.
1368  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1369    Label dividend_not_zero;
1370    __ test(dividend, dividend);
1371    __ j(not_zero, &dividend_not_zero, Label::kNear);
1372    __ test(divisor, divisor);
1373    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1374    __ bind(&dividend_not_zero);
1375  }
1376
1377  // Check for (kMinInt / -1).
1378  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1379    Label dividend_not_min_int;
1380    __ cmp(dividend, kMinInt);
1381    __ j(not_zero, &dividend_not_min_int, Label::kNear);
1382    __ cmp(divisor, -1);
1383    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1384    __ bind(&dividend_not_min_int);
1385  }
1386
1387  // Sign extend to edx (= remainder).
1388  __ cdq();
1389  __ idiv(divisor);
1390
1391  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1392    // Deoptimize if remainder is not 0.
1393    __ test(remainder, remainder);
1394    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
1395  }
1396}
1397
1398
1399void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1400  Register dividend = ToRegister(instr->dividend());
1401  int32_t divisor = instr->divisor();
1402  DCHECK(dividend.is(ToRegister(instr->result())));
1403
1404  // If the divisor is positive, things are easy: There can be no deopts and we
1405  // can simply do an arithmetic right shift.
1406  if (divisor == 1) return;
1407  int32_t shift = WhichPowerOf2Abs(divisor);
1408  if (divisor > 1) {
1409    __ sar(dividend, shift);
1410    return;
1411  }
1412
1413  // If the divisor is negative, we have to negate and handle edge cases.
1414  __ neg(dividend);
1415  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1416    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1417  }
1418
1419  // Dividing by -1 is basically negation, unless we overflow.
1420  if (divisor == -1) {
1421    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1422      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1423    }
1424    return;
1425  }
1426
1427  // If the negation could not overflow, simply shifting is OK.
1428  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1429    __ sar(dividend, shift);
1430    return;
1431  }
1432
1433  Label not_kmin_int, done;
1434  __ j(no_overflow, &not_kmin_int, Label::kNear);
1435  __ mov(dividend, Immediate(kMinInt / divisor));
1436  __ jmp(&done, Label::kNear);
1437  __ bind(&not_kmin_int);
1438  __ sar(dividend, shift);
1439  __ bind(&done);
1440}
1441
1442
1443void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1444  Register dividend = ToRegister(instr->dividend());
1445  int32_t divisor = instr->divisor();
1446  DCHECK(ToRegister(instr->result()).is(edx));
1447
1448  if (divisor == 0) {
1449    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
1450    return;
1451  }
1452
1453  // Check for (0 / -x) that will produce negative zero.
1454  HMathFloorOfDiv* hdiv = instr->hydrogen();
1455  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1456    __ test(dividend, dividend);
1457    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
1458  }
1459
1460  // Easy case: We need no dynamic check for the dividend and the flooring
1461  // division is the same as the truncating division.
1462  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1463      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1464    __ TruncatingDiv(dividend, Abs(divisor));
1465    if (divisor < 0) __ neg(edx);
1466    return;
1467  }
1468
1469  // In the general case we may need to adjust before and after the truncating
1470  // division to get a flooring division.
1471  Register temp = ToRegister(instr->temp3());
1472  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1473  Label needs_adjustment, done;
1474  __ cmp(dividend, Immediate(0));
1475  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1476  __ TruncatingDiv(dividend, Abs(divisor));
1477  if (divisor < 0) __ neg(edx);
1478  __ jmp(&done, Label::kNear);
1479  __ bind(&needs_adjustment);
1480  __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1481  __ TruncatingDiv(temp, Abs(divisor));
1482  if (divisor < 0) __ neg(edx);
1483  __ dec(edx);
1484  __ bind(&done);
1485}
1486
1487
1488// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1489void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1490  HBinaryOperation* hdiv = instr->hydrogen();
1491  Register dividend = ToRegister(instr->dividend());
1492  Register divisor = ToRegister(instr->divisor());
1493  Register remainder = ToRegister(instr->temp());
1494  Register result = ToRegister(instr->result());
1495  DCHECK(dividend.is(eax));
1496  DCHECK(remainder.is(edx));
1497  DCHECK(result.is(eax));
1498  DCHECK(!divisor.is(eax));
1499  DCHECK(!divisor.is(edx));
1500
1501  // Check for x / 0.
1502  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1503    __ test(divisor, divisor);
1504    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
1505  }
1506
1507  // Check for (0 / -x) that will produce negative zero.
1508  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1509    Label dividend_not_zero;
1510    __ test(dividend, dividend);
1511    __ j(not_zero, &dividend_not_zero, Label::kNear);
1512    __ test(divisor, divisor);
1513    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1514    __ bind(&dividend_not_zero);
1515  }
1516
1517  // Check for (kMinInt / -1).
1518  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1519    Label dividend_not_min_int;
1520    __ cmp(dividend, kMinInt);
1521    __ j(not_zero, &dividend_not_min_int, Label::kNear);
1522    __ cmp(divisor, -1);
1523    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
1524    __ bind(&dividend_not_min_int);
1525  }
1526
1527  // Sign extend to edx (= remainder).
1528  __ cdq();
1529  __ idiv(divisor);
1530
1531  Label done;
1532  __ test(remainder, remainder);
1533  __ j(zero, &done, Label::kNear);
1534  __ xor_(remainder, divisor);
1535  __ sar(remainder, 31);
1536  __ add(result, remainder);
1537  __ bind(&done);
1538}
1539
1540
1541void LCodeGen::DoMulI(LMulI* instr) {
1542  Register left = ToRegister(instr->left());
1543  LOperand* right = instr->right();
1544
1545  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1546    __ mov(ToRegister(instr->temp()), left);
1547  }
1548
1549  if (right->IsConstantOperand()) {
1550    // Try strength reductions on the multiplication.
1551    // All replacement instructions are at most as long as the imul
1552    // and have better latency.
1553    int constant = ToInteger32(LConstantOperand::cast(right));
1554    if (constant == -1) {
1555      __ neg(left);
1556    } else if (constant == 0) {
1557      __ xor_(left, Operand(left));
1558    } else if (constant == 2) {
1559      __ add(left, Operand(left));
1560    } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1561      // If we know that the multiplication can't overflow, it's safe to
1562      // use instructions that don't set the overflow flag for the
1563      // multiplication.
1564      switch (constant) {
1565        case 1:
1566          // Do nothing.
1567          break;
1568        case 3:
1569          __ lea(left, Operand(left, left, times_2, 0));
1570          break;
1571        case 4:
1572          __ shl(left, 2);
1573          break;
1574        case 5:
1575          __ lea(left, Operand(left, left, times_4, 0));
1576          break;
1577        case 8:
1578          __ shl(left, 3);
1579          break;
1580        case 9:
1581          __ lea(left, Operand(left, left, times_8, 0));
1582          break;
1583        case 16:
1584          __ shl(left, 4);
1585          break;
1586        default:
1587          __ imul(left, left, constant);
1588          break;
1589      }
1590    } else {
1591      __ imul(left, left, constant);
1592    }
1593  } else {
1594    if (instr->hydrogen()->representation().IsSmi()) {
1595      __ SmiUntag(left);
1596    }
1597    __ imul(left, ToOperand(right));
1598  }
1599
1600  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1601    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1602  }
1603
1604  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1605    // Bail out if the result is supposed to be negative zero.
1606    Label done;
1607    __ test(left, Operand(left));
1608    __ j(not_zero, &done);
1609    if (right->IsConstantOperand()) {
1610      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1611        DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
1612      } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1613        __ cmp(ToRegister(instr->temp()), Immediate(0));
1614        DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
1615      }
1616    } else {
1617      // Test the non-zero operand for negative sign.
1618      __ or_(ToRegister(instr->temp()), ToOperand(right));
1619      DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
1620    }
1621    __ bind(&done);
1622  }
1623}
1624
1625
1626void LCodeGen::DoBitI(LBitI* instr) {
1627  LOperand* left = instr->left();
1628  LOperand* right = instr->right();
1629  DCHECK(left->Equals(instr->result()));
1630  DCHECK(left->IsRegister());
1631
1632  if (right->IsConstantOperand()) {
1633    int32_t right_operand =
1634        ToRepresentation(LConstantOperand::cast(right),
1635                         instr->hydrogen()->representation());
1636    switch (instr->op()) {
1637      case Token::BIT_AND:
1638        __ and_(ToRegister(left), right_operand);
1639        break;
1640      case Token::BIT_OR:
1641        __ or_(ToRegister(left), right_operand);
1642        break;
1643      case Token::BIT_XOR:
1644        if (right_operand == int32_t(~0)) {
1645          __ not_(ToRegister(left));
1646        } else {
1647          __ xor_(ToRegister(left), right_operand);
1648        }
1649        break;
1650      default:
1651        UNREACHABLE();
1652        break;
1653    }
1654  } else {
1655    switch (instr->op()) {
1656      case Token::BIT_AND:
1657        __ and_(ToRegister(left), ToOperand(right));
1658        break;
1659      case Token::BIT_OR:
1660        __ or_(ToRegister(left), ToOperand(right));
1661        break;
1662      case Token::BIT_XOR:
1663        __ xor_(ToRegister(left), ToOperand(right));
1664        break;
1665      default:
1666        UNREACHABLE();
1667        break;
1668    }
1669  }
1670}
1671
1672
1673void LCodeGen::DoShiftI(LShiftI* instr) {
1674  LOperand* left = instr->left();
1675  LOperand* right = instr->right();
1676  DCHECK(left->Equals(instr->result()));
1677  DCHECK(left->IsRegister());
1678  if (right->IsRegister()) {
1679    DCHECK(ToRegister(right).is(ecx));
1680
1681    switch (instr->op()) {
1682      case Token::ROR:
1683        __ ror_cl(ToRegister(left));
1684        break;
1685      case Token::SAR:
1686        __ sar_cl(ToRegister(left));
1687        break;
1688      case Token::SHR:
1689        __ shr_cl(ToRegister(left));
1690        if (instr->can_deopt()) {
1691          __ test(ToRegister(left), ToRegister(left));
1692          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1693        }
1694        break;
1695      case Token::SHL:
1696        __ shl_cl(ToRegister(left));
1697        break;
1698      default:
1699        UNREACHABLE();
1700        break;
1701    }
1702  } else {
1703    int value = ToInteger32(LConstantOperand::cast(right));
1704    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1705    switch (instr->op()) {
1706      case Token::ROR:
1707        if (shift_count == 0 && instr->can_deopt()) {
1708          __ test(ToRegister(left), ToRegister(left));
1709          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1710        } else {
1711          __ ror(ToRegister(left), shift_count);
1712        }
1713        break;
1714      case Token::SAR:
1715        if (shift_count != 0) {
1716          __ sar(ToRegister(left), shift_count);
1717        }
1718        break;
1719      case Token::SHR:
1720        if (shift_count != 0) {
1721          __ shr(ToRegister(left), shift_count);
1722        } else if (instr->can_deopt()) {
1723          __ test(ToRegister(left), ToRegister(left));
1724          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
1725        }
1726        break;
1727      case Token::SHL:
1728        if (shift_count != 0) {
1729          if (instr->hydrogen_value()->representation().IsSmi() &&
1730              instr->can_deopt()) {
1731            if (shift_count != 1) {
1732              __ shl(ToRegister(left), shift_count - 1);
1733            }
1734            __ SmiTag(ToRegister(left));
1735            DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1736          } else {
1737            __ shl(ToRegister(left), shift_count);
1738          }
1739        }
1740        break;
1741      default:
1742        UNREACHABLE();
1743        break;
1744    }
1745  }
1746}
1747
1748
1749void LCodeGen::DoSubI(LSubI* instr) {
1750  LOperand* left = instr->left();
1751  LOperand* right = instr->right();
1752  DCHECK(left->Equals(instr->result()));
1753
1754  if (right->IsConstantOperand()) {
1755    __ sub(ToOperand(left),
1756           ToImmediate(right, instr->hydrogen()->representation()));
1757  } else {
1758    __ sub(ToRegister(left), ToOperand(right));
1759  }
1760  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1761    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1762  }
1763}
1764
1765
1766void LCodeGen::DoConstantI(LConstantI* instr) {
1767  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1768}
1769
1770
1771void LCodeGen::DoConstantS(LConstantS* instr) {
1772  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1773}
1774
1775
1776void LCodeGen::DoConstantD(LConstantD* instr) {
1777  uint64_t const bits = instr->bits();
1778  uint32_t const lower = static_cast<uint32_t>(bits);
1779  uint32_t const upper = static_cast<uint32_t>(bits >> 32);
1780  DCHECK(instr->result()->IsDoubleRegister());
1781
1782  __ push(Immediate(upper));
1783  __ push(Immediate(lower));
1784  X87Register reg = ToX87Register(instr->result());
1785  X87Mov(reg, Operand(esp, 0));
1786  __ add(Operand(esp), Immediate(kDoubleSize));
1787}
1788
1789
1790void LCodeGen::DoConstantE(LConstantE* instr) {
1791  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1792}
1793
1794
1795void LCodeGen::DoConstantT(LConstantT* instr) {
1796  Register reg = ToRegister(instr->result());
1797  Handle<Object> object = instr->value(isolate());
1798  AllowDeferredHandleDereference smi_check;
1799  __ LoadObject(reg, object);
1800}
1801
1802
1803Operand LCodeGen::BuildSeqStringOperand(Register string,
1804                                        LOperand* index,
1805                                        String::Encoding encoding) {
1806  if (index->IsConstantOperand()) {
1807    int offset = ToRepresentation(LConstantOperand::cast(index),
1808                                  Representation::Integer32());
1809    if (encoding == String::TWO_BYTE_ENCODING) {
1810      offset *= kUC16Size;
1811    }
1812    STATIC_ASSERT(kCharSize == 1);
1813    return FieldOperand(string, SeqString::kHeaderSize + offset);
1814  }
1815  return FieldOperand(
1816      string, ToRegister(index),
1817      encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1818      SeqString::kHeaderSize);
1819}
1820
1821
1822void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1823  String::Encoding encoding = instr->hydrogen()->encoding();
1824  Register result = ToRegister(instr->result());
1825  Register string = ToRegister(instr->string());
1826
1827  if (FLAG_debug_code) {
1828    __ push(string);
1829    __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
1830    __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
1831
1832    __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1833    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1834    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1835    __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1836                             ? one_byte_seq_type : two_byte_seq_type));
1837    __ Check(equal, kUnexpectedStringType);
1838    __ pop(string);
1839  }
1840
1841  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1842  if (encoding == String::ONE_BYTE_ENCODING) {
1843    __ movzx_b(result, operand);
1844  } else {
1845    __ movzx_w(result, operand);
1846  }
1847}
1848
1849
1850void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1851  String::Encoding encoding = instr->hydrogen()->encoding();
1852  Register string = ToRegister(instr->string());
1853
1854  if (FLAG_debug_code) {
1855    Register value = ToRegister(instr->value());
1856    Register index = ToRegister(instr->index());
1857    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1858    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1859    int encoding_mask =
1860        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1861        ? one_byte_seq_type : two_byte_seq_type;
1862    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1863  }
1864
1865  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1866  if (instr->value()->IsConstantOperand()) {
1867    int value = ToRepresentation(LConstantOperand::cast(instr->value()),
1868                                 Representation::Integer32());
1869    DCHECK_LE(0, value);
1870    if (encoding == String::ONE_BYTE_ENCODING) {
1871      DCHECK_LE(value, String::kMaxOneByteCharCode);
1872      __ mov_b(operand, static_cast<int8_t>(value));
1873    } else {
1874      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1875      __ mov_w(operand, static_cast<int16_t>(value));
1876    }
1877  } else {
1878    Register value = ToRegister(instr->value());
1879    if (encoding == String::ONE_BYTE_ENCODING) {
1880      __ mov_b(operand, value);
1881    } else {
1882      __ mov_w(operand, value);
1883    }
1884  }
1885}
1886
1887
1888void LCodeGen::DoAddI(LAddI* instr) {
1889  LOperand* left = instr->left();
1890  LOperand* right = instr->right();
1891
1892  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1893    if (right->IsConstantOperand()) {
1894      int32_t offset = ToRepresentation(LConstantOperand::cast(right),
1895                                        instr->hydrogen()->representation());
1896      __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
1897    } else {
1898      Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1899      __ lea(ToRegister(instr->result()), address);
1900    }
1901  } else {
1902    if (right->IsConstantOperand()) {
1903      __ add(ToOperand(left),
1904             ToImmediate(right, instr->hydrogen()->representation()));
1905    } else {
1906      __ add(ToRegister(left), ToOperand(right));
1907    }
1908    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1909      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1910    }
1911  }
1912}
1913
1914
1915void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1916  LOperand* left = instr->left();
1917  LOperand* right = instr->right();
1918  DCHECK(left->Equals(instr->result()));
1919  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1920  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1921    Label return_left;
1922    Condition condition = (operation == HMathMinMax::kMathMin)
1923        ? less_equal
1924        : greater_equal;
1925    if (right->IsConstantOperand()) {
1926      Operand left_op = ToOperand(left);
1927      Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
1928                                        instr->hydrogen()->representation());
1929      __ cmp(left_op, immediate);
1930      __ j(condition, &return_left, Label::kNear);
1931      __ mov(left_op, immediate);
1932    } else {
1933      Register left_reg = ToRegister(left);
1934      Operand right_op = ToOperand(right);
1935      __ cmp(left_reg, right_op);
1936      __ j(condition, &return_left, Label::kNear);
1937      __ mov(left_reg, right_op);
1938    }
1939    __ bind(&return_left);
1940  } else {
1941    DCHECK(instr->hydrogen()->representation().IsDouble());
1942    Label check_nan_left, check_zero, return_left, return_right;
1943    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1944    X87Register left_reg = ToX87Register(left);
1945    X87Register right_reg = ToX87Register(right);
1946
1947    X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
1948    __ fld(1);
1949    __ fld(1);
1950    __ FCmp();
1951    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
1952    __ j(equal, &check_zero, Label::kNear);            // left == right.
1953    __ j(condition, &return_left, Label::kNear);
1954    __ jmp(&return_right, Label::kNear);
1955
1956    __ bind(&check_zero);
1957    __ fld(0);
1958    __ fldz();
1959    __ FCmp();
1960    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
1961    // At this point, both left and right are either 0 or -0.
1962    if (operation == HMathMinMax::kMathMin) {
1963      // Push st0 and st1 to stack, then pop them to temp registers and OR them,
1964      // load it to left.
1965      Register scratch_reg = ToRegister(instr->temp());
1966      __ fld(1);
1967      __ fld(1);
1968      __ sub(esp, Immediate(2 * kPointerSize));
1969      __ fstp_s(MemOperand(esp, 0));
1970      __ fstp_s(MemOperand(esp, kPointerSize));
1971      __ pop(scratch_reg);
1972      __ or_(MemOperand(esp, 0), scratch_reg);
1973      X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
1974      __ pop(scratch_reg);  // restore esp
1975    } else {
1976      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1977      X87Fxch(left_reg);
1978      __ fadd(1);
1979    }
1980    __ jmp(&return_left, Label::kNear);
1981
1982    __ bind(&check_nan_left);
1983    __ fld(0);
1984    __ fld(0);
1985    __ FCmp();                                      // NaN check.
1986    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
1987
1988    __ bind(&return_right);
1989    X87Fxch(left_reg);
1990    X87Mov(left_reg, right_reg);
1991
1992    __ bind(&return_left);
1993  }
1994}
1995
1996
1997void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1998  X87Register left = ToX87Register(instr->left());
1999  X87Register right = ToX87Register(instr->right());
2000  X87Register result = ToX87Register(instr->result());
2001  if (instr->op() != Token::MOD) {
2002    X87PrepareBinaryOp(left, right, result);
2003  }
2004  // Set the precision control to double-precision.
2005  __ X87SetFPUCW(0x027F);
2006  switch (instr->op()) {
2007    case Token::ADD:
2008      __ fadd_i(1);
2009      break;
2010    case Token::SUB:
2011      __ fsub_i(1);
2012      break;
2013    case Token::MUL:
2014      __ fmul_i(1);
2015      break;
2016    case Token::DIV:
2017      __ fdiv_i(1);
2018      break;
2019    case Token::MOD: {
2020      // Pass two doubles as arguments on the stack.
2021      __ PrepareCallCFunction(4, eax);
2022      X87Mov(Operand(esp, 1 * kDoubleSize), right);
2023      X87Mov(Operand(esp, 0), left);
2024      X87Free(right);
2025      DCHECK(left.is(result));
2026      X87PrepareToWrite(result);
2027      __ CallCFunction(
2028          ExternalReference::mod_two_doubles_operation(isolate()),
2029          4);
2030
2031      // Return value is in st(0) on ia32.
2032      X87CommitWrite(result);
2033      break;
2034    }
2035    default:
2036      UNREACHABLE();
2037      break;
2038  }
2039
2040  // Restore the default value of control word.
2041  __ X87SetFPUCW(0x037F);
2042}
2043
2044
2045void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2046  DCHECK(ToRegister(instr->context()).is(esi));
2047  DCHECK(ToRegister(instr->left()).is(edx));
2048  DCHECK(ToRegister(instr->right()).is(eax));
2049  DCHECK(ToRegister(instr->result()).is(eax));
2050
2051  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2052  CallCode(code, RelocInfo::CODE_TARGET, instr);
2053}
2054
2055
2056template<class InstrType>
2057void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2058  int left_block = instr->TrueDestination(chunk_);
2059  int right_block = instr->FalseDestination(chunk_);
2060
2061  int next_block = GetNextEmittedBlock();
2062
2063  if (right_block == left_block || cc == no_condition) {
2064    EmitGoto(left_block);
2065  } else if (left_block == next_block) {
2066    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2067  } else if (right_block == next_block) {
2068    __ j(cc, chunk_->GetAssemblyLabel(left_block));
2069  } else {
2070    __ j(cc, chunk_->GetAssemblyLabel(left_block));
2071    __ jmp(chunk_->GetAssemblyLabel(right_block));
2072  }
2073}
2074
2075
2076template <class InstrType>
2077void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
2078  int true_block = instr->TrueDestination(chunk_);
2079  if (cc == no_condition) {
2080    __ jmp(chunk_->GetAssemblyLabel(true_block));
2081  } else {
2082    __ j(cc, chunk_->GetAssemblyLabel(true_block));
2083  }
2084}
2085
2086
2087template<class InstrType>
2088void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2089  int false_block = instr->FalseDestination(chunk_);
2090  if (cc == no_condition) {
2091    __ jmp(chunk_->GetAssemblyLabel(false_block));
2092  } else {
2093    __ j(cc, chunk_->GetAssemblyLabel(false_block));
2094  }
2095}
2096
2097
2098void LCodeGen::DoBranch(LBranch* instr) {
2099  Representation r = instr->hydrogen()->value()->representation();
2100  if (r.IsSmiOrInteger32()) {
2101    Register reg = ToRegister(instr->value());
2102    __ test(reg, Operand(reg));
2103    EmitBranch(instr, not_zero);
2104  } else if (r.IsDouble()) {
2105    X87Register reg = ToX87Register(instr->value());
2106    X87LoadForUsage(reg);
2107    __ fldz();
2108    __ FCmp();
2109    EmitBranch(instr, not_zero);
2110  } else {
2111    DCHECK(r.IsTagged());
2112    Register reg = ToRegister(instr->value());
2113    HType type = instr->hydrogen()->value()->type();
2114    if (type.IsBoolean()) {
2115      DCHECK(!info()->IsStub());
2116      __ cmp(reg, factory()->true_value());
2117      EmitBranch(instr, equal);
2118    } else if (type.IsSmi()) {
2119      DCHECK(!info()->IsStub());
2120      __ test(reg, Operand(reg));
2121      EmitBranch(instr, not_equal);
2122    } else if (type.IsJSArray()) {
2123      DCHECK(!info()->IsStub());
2124      EmitBranch(instr, no_condition);
2125    } else if (type.IsHeapNumber()) {
2126      UNREACHABLE();
2127    } else if (type.IsString()) {
2128      DCHECK(!info()->IsStub());
2129      __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2130      EmitBranch(instr, not_equal);
2131    } else {
2132      ToBooleanICStub::Types expected =
2133          instr->hydrogen()->expected_input_types();
2134      if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
2135
2136      if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
2137        // undefined -> false.
2138        __ cmp(reg, factory()->undefined_value());
2139        __ j(equal, instr->FalseLabel(chunk_));
2140      }
2141      if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
2142        // true -> true.
2143        __ cmp(reg, factory()->true_value());
2144        __ j(equal, instr->TrueLabel(chunk_));
2145        // false -> false.
2146        __ cmp(reg, factory()->false_value());
2147        __ j(equal, instr->FalseLabel(chunk_));
2148      }
2149      if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
2150        // 'null' -> false.
2151        __ cmp(reg, factory()->null_value());
2152        __ j(equal, instr->FalseLabel(chunk_));
2153      }
2154
2155      if (expected.Contains(ToBooleanICStub::SMI)) {
2156        // Smis: 0 -> false, all other -> true.
2157        __ test(reg, Operand(reg));
2158        __ j(equal, instr->FalseLabel(chunk_));
2159        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2160      } else if (expected.NeedsMap()) {
2161        // If we need a map later and have a Smi -> deopt.
2162        __ test(reg, Immediate(kSmiTagMask));
2163        DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
2164      }
2165
2166      Register map = no_reg;  // Keep the compiler happy.
2167      if (expected.NeedsMap()) {
2168        map = ToRegister(instr->temp());
2169        DCHECK(!map.is(reg));
2170        __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2171
2172        if (expected.CanBeUndetectable()) {
2173          // Undetectable -> false.
2174          __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2175                    Immediate(1 << Map::kIsUndetectable));
2176          __ j(not_zero, instr->FalseLabel(chunk_));
2177        }
2178      }
2179
2180      if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
2181        // spec object -> true.
2182        __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
2183        __ j(above_equal, instr->TrueLabel(chunk_));
2184      }
2185
2186      if (expected.Contains(ToBooleanICStub::STRING)) {
2187        // String value -> false iff empty.
2188        Label not_string;
2189        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2190        __ j(above_equal, &not_string, Label::kNear);
2191        __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2192        __ j(not_zero, instr->TrueLabel(chunk_));
2193        __ jmp(instr->FalseLabel(chunk_));
2194        __ bind(&not_string);
2195      }
2196
2197      if (expected.Contains(ToBooleanICStub::SYMBOL)) {
2198        // Symbol value -> true.
2199        __ CmpInstanceType(map, SYMBOL_TYPE);
2200        __ j(equal, instr->TrueLabel(chunk_));
2201      }
2202
2203      if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
2204        // SIMD value -> true.
2205        __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
2206        __ j(equal, instr->TrueLabel(chunk_));
2207      }
2208
2209      if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
2210        // heap number -> false iff +0, -0, or NaN.
2211        Label not_heap_number;
2212        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2213               factory()->heap_number_map());
2214        __ j(not_equal, &not_heap_number, Label::kNear);
2215        __ fldz();
2216        __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2217        __ FCmp();
2218        __ j(zero, instr->FalseLabel(chunk_));
2219        __ jmp(instr->TrueLabel(chunk_));
2220        __ bind(&not_heap_number);
2221      }
2222
2223      if (!expected.IsGeneric()) {
2224        // We've seen something for the first time -> deopt.
2225        // This can only happen if we are not generic already.
2226        DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
2227      }
2228    }
2229  }
2230}
2231
2232
2233void LCodeGen::EmitGoto(int block) {
2234  if (!IsNextEmittedBlock(block)) {
2235    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2236  }
2237}
2238
2239
2240void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2241}
2242
2243
2244void LCodeGen::DoGoto(LGoto* instr) {
2245  EmitGoto(instr->block_id());
2246}
2247
2248
2249Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2250  Condition cond = no_condition;
2251  switch (op) {
2252    case Token::EQ:
2253    case Token::EQ_STRICT:
2254      cond = equal;
2255      break;
2256    case Token::NE:
2257    case Token::NE_STRICT:
2258      cond = not_equal;
2259      break;
2260    case Token::LT:
2261      cond = is_unsigned ? below : less;
2262      break;
2263    case Token::GT:
2264      cond = is_unsigned ? above : greater;
2265      break;
2266    case Token::LTE:
2267      cond = is_unsigned ? below_equal : less_equal;
2268      break;
2269    case Token::GTE:
2270      cond = is_unsigned ? above_equal : greater_equal;
2271      break;
2272    case Token::IN:
2273    case Token::INSTANCEOF:
2274    default:
2275      UNREACHABLE();
2276  }
2277  return cond;
2278}
2279
2280
2281void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2282  LOperand* left = instr->left();
2283  LOperand* right = instr->right();
2284  bool is_unsigned =
2285      instr->is_double() ||
2286      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2287      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2288  Condition cc = TokenToCondition(instr->op(), is_unsigned);
2289
2290  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2291    // We can statically evaluate the comparison.
2292    double left_val = ToDouble(LConstantOperand::cast(left));
2293    double right_val = ToDouble(LConstantOperand::cast(right));
2294    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2295                         ? instr->TrueDestination(chunk_)
2296                         : instr->FalseDestination(chunk_);
2297    EmitGoto(next_block);
2298  } else {
2299    if (instr->is_double()) {
2300      X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2301      __ FCmp();
2302      // Don't base result on EFLAGS when a NaN is involved. Instead
2303      // jump to the false block.
2304      __ j(parity_even, instr->FalseLabel(chunk_));
2305    } else {
2306      if (right->IsConstantOperand()) {
2307        __ cmp(ToOperand(left),
2308               ToImmediate(right, instr->hydrogen()->representation()));
2309      } else if (left->IsConstantOperand()) {
2310        __ cmp(ToOperand(right),
2311               ToImmediate(left, instr->hydrogen()->representation()));
2312        // We commuted the operands, so commute the condition.
2313        cc = CommuteCondition(cc);
2314      } else {
2315        __ cmp(ToRegister(left), ToOperand(right));
2316      }
2317    }
2318    EmitBranch(instr, cc);
2319  }
2320}
2321
2322
2323void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2324  Register left = ToRegister(instr->left());
2325
2326  if (instr->right()->IsConstantOperand()) {
2327    Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2328    __ CmpObject(left, right);
2329  } else {
2330    Operand right = ToOperand(instr->right());
2331    __ cmp(left, right);
2332  }
2333  EmitBranch(instr, equal);
2334}
2335
2336
2337void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2338  if (instr->hydrogen()->representation().IsTagged()) {
2339    Register input_reg = ToRegister(instr->object());
2340    __ cmp(input_reg, factory()->the_hole_value());
2341    EmitBranch(instr, equal);
2342    return;
2343  }
2344
2345  // Put the value to the top of stack
2346  X87Register src = ToX87Register(instr->object());
2347  X87LoadForUsage(src);
2348  __ fld(0);
2349  __ fld(0);
2350  __ FCmp();
2351  Label ok;
2352  __ j(parity_even, &ok, Label::kNear);
2353  __ fstp(0);
2354  EmitFalseBranch(instr, no_condition);
2355  __ bind(&ok);
2356
2357
2358  __ sub(esp, Immediate(kDoubleSize));
2359  __ fstp_d(MemOperand(esp, 0));
2360
2361  __ add(esp, Immediate(kDoubleSize));
2362  int offset = sizeof(kHoleNanUpper32);
2363  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2364  EmitBranch(instr, equal);
2365}
2366
2367
2368Condition LCodeGen::EmitIsString(Register input,
2369                                 Register temp1,
2370                                 Label* is_not_string,
2371                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2372  if (check_needed == INLINE_SMI_CHECK) {
2373    __ JumpIfSmi(input, is_not_string);
2374  }
2375
2376  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2377
2378  return cond;
2379}
2380
2381
2382void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2383  Register reg = ToRegister(instr->value());
2384  Register temp = ToRegister(instr->temp());
2385
2386  SmiCheck check_needed =
2387      instr->hydrogen()->value()->type().IsHeapObject()
2388          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2389
2390  Condition true_cond = EmitIsString(
2391      reg, temp, instr->FalseLabel(chunk_), check_needed);
2392
2393  EmitBranch(instr, true_cond);
2394}
2395
2396
2397void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2398  Operand input = ToOperand(instr->value());
2399
2400  __ test(input, Immediate(kSmiTagMask));
2401  EmitBranch(instr, zero);
2402}
2403
2404
2405void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2406  Register input = ToRegister(instr->value());
2407  Register temp = ToRegister(instr->temp());
2408
2409  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2410    STATIC_ASSERT(kSmiTag == 0);
2411    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2412  }
2413  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2414  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2415            Immediate(1 << Map::kIsUndetectable));
2416  EmitBranch(instr, not_zero);
2417}
2418
2419
2420static Condition ComputeCompareCondition(Token::Value op) {
2421  switch (op) {
2422    case Token::EQ_STRICT:
2423    case Token::EQ:
2424      return equal;
2425    case Token::LT:
2426      return less;
2427    case Token::GT:
2428      return greater;
2429    case Token::LTE:
2430      return less_equal;
2431    case Token::GTE:
2432      return greater_equal;
2433    default:
2434      UNREACHABLE();
2435      return no_condition;
2436  }
2437}
2438
2439
2440void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2441  DCHECK(ToRegister(instr->context()).is(esi));
2442  DCHECK(ToRegister(instr->left()).is(edx));
2443  DCHECK(ToRegister(instr->right()).is(eax));
2444
2445  Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2446  CallCode(code, RelocInfo::CODE_TARGET, instr);
2447  __ CompareRoot(eax, Heap::kTrueValueRootIndex);
2448  EmitBranch(instr, equal);
2449}
2450
2451
2452static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2453  InstanceType from = instr->from();
2454  InstanceType to = instr->to();
2455  if (from == FIRST_TYPE) return to;
2456  DCHECK(from == to || to == LAST_TYPE);
2457  return from;
2458}
2459
2460
2461static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2462  InstanceType from = instr->from();
2463  InstanceType to = instr->to();
2464  if (from == to) return equal;
2465  if (to == LAST_TYPE) return above_equal;
2466  if (from == FIRST_TYPE) return below_equal;
2467  UNREACHABLE();
2468  return equal;
2469}
2470
2471
2472void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2473  Register input = ToRegister(instr->value());
2474  Register temp = ToRegister(instr->temp());
2475
2476  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2477    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2478  }
2479
2480  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2481  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2482}
2483
2484
2485void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2486  Register input = ToRegister(instr->value());
2487  Register result = ToRegister(instr->result());
2488
2489  __ AssertString(input);
2490
2491  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2492  __ IndexFromHash(result, result);
2493}
2494
2495
2496void LCodeGen::DoHasCachedArrayIndexAndBranch(
2497    LHasCachedArrayIndexAndBranch* instr) {
2498  Register input = ToRegister(instr->value());
2499
2500  __ test(FieldOperand(input, String::kHashFieldOffset),
2501          Immediate(String::kContainsCachedArrayIndexMask));
2502  EmitBranch(instr, equal);
2503}
2504
2505
2506// Branches to a label or falls through with the answer in the z flag.  Trashes
2507// the temp registers, but not the input.
2508void LCodeGen::EmitClassOfTest(Label* is_true,
2509                               Label* is_false,
2510                               Handle<String>class_name,
2511                               Register input,
2512                               Register temp,
2513                               Register temp2) {
2514  DCHECK(!input.is(temp));
2515  DCHECK(!input.is(temp2));
2516  DCHECK(!temp.is(temp2));
2517  __ JumpIfSmi(input, is_false);
2518
2519  __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
2520  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2521  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2522    __ j(above_equal, is_true);
2523  } else {
2524    __ j(above_equal, is_false);
2525  }
2526
2527  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2528  // Check if the constructor in the map is a function.
2529  __ GetMapConstructor(temp, temp, temp2);
2530  // Objects with a non-function constructor have class 'Object'.
2531  __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
2532  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2533    __ j(not_equal, is_true);
2534  } else {
2535    __ j(not_equal, is_false);
2536  }
2537
2538  // temp now contains the constructor function. Grab the
2539  // instance class name from there.
2540  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2541  __ mov(temp, FieldOperand(temp,
2542                            SharedFunctionInfo::kInstanceClassNameOffset));
2543  // The class name we are testing against is internalized since it's a literal.
2544  // The name in the constructor is internalized because of the way the context
2545  // is booted.  This routine isn't expected to work for random API-created
2546  // classes and it doesn't have to because you can't access it with natives
2547  // syntax.  Since both sides are internalized it is sufficient to use an
2548  // identity comparison.
2549  __ cmp(temp, class_name);
2550  // End with the answer in the z flag.
2551}
2552
2553
2554void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2555  Register input = ToRegister(instr->value());
2556  Register temp = ToRegister(instr->temp());
2557  Register temp2 = ToRegister(instr->temp2());
2558
2559  Handle<String> class_name = instr->hydrogen()->class_name();
2560
2561  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2562      class_name, input, temp, temp2);
2563
2564  EmitBranch(instr, equal);
2565}
2566
2567
2568void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2569  Register reg = ToRegister(instr->value());
2570  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2571  EmitBranch(instr, equal);
2572}
2573
2574
2575void LCodeGen::DoHasInPrototypeChainAndBranch(
2576    LHasInPrototypeChainAndBranch* instr) {
2577  Register const object = ToRegister(instr->object());
2578  Register const object_map = ToRegister(instr->scratch());
2579  Register const object_prototype = object_map;
2580  Register const prototype = ToRegister(instr->prototype());
2581
2582  // The {object} must be a spec object.  It's sufficient to know that {object}
2583  // is not a smi, since all other non-spec objects have {null} prototypes and
2584  // will be ruled out below.
2585  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2586    __ test(object, Immediate(kSmiTagMask));
2587    EmitFalseBranch(instr, zero);
2588  }
2589
2590  // Loop through the {object}s prototype chain looking for the {prototype}.
2591  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
2592  Label loop;
2593  __ bind(&loop);
2594
2595  // Deoptimize if the object needs to be access checked.
2596  __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
2597            Immediate(1 << Map::kIsAccessCheckNeeded));
2598  DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
2599  // Deoptimize for proxies.
2600  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2601  DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
2602
2603  __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2604  __ cmp(object_prototype, factory()->null_value());
2605  EmitFalseBranch(instr, equal);
2606  __ cmp(object_prototype, prototype);
2607  EmitTrueBranch(instr, equal);
2608  __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2609  __ jmp(&loop);
2610}
2611
2612
2613void LCodeGen::DoCmpT(LCmpT* instr) {
2614  Token::Value op = instr->op();
2615
2616  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2617  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2618
2619  Condition condition = ComputeCompareCondition(op);
2620  Label true_value, done;
2621  __ test(eax, Operand(eax));
2622  __ j(condition, &true_value, Label::kNear);
2623  __ mov(ToRegister(instr->result()), factory()->false_value());
2624  __ jmp(&done, Label::kNear);
2625  __ bind(&true_value);
2626  __ mov(ToRegister(instr->result()), factory()->true_value());
2627  __ bind(&done);
2628}
2629
2630void LCodeGen::EmitReturn(LReturn* instr) {
2631  int extra_value_count = 1;
2632
2633  if (instr->has_constant_parameter_count()) {
2634    int parameter_count = ToInteger32(instr->constant_parameter_count());
2635    __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2636  } else {
2637    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2638    Register reg = ToRegister(instr->parameter_count());
2639    // The argument count parameter is a smi
2640    __ SmiUntag(reg);
2641    Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2642
2643    // emit code to restore stack based on instr->parameter_count()
2644    __ pop(return_addr_reg);  // save return address
2645    __ shl(reg, kPointerSizeLog2);
2646    __ add(esp, reg);
2647    __ jmp(return_addr_reg);
2648  }
2649}
2650
2651
2652void LCodeGen::DoReturn(LReturn* instr) {
2653  if (FLAG_trace && info()->IsOptimizing()) {
2654    // Preserve the return value on the stack and rely on the runtime call
2655    // to return the value in the same register.  We're leaving the code
2656    // managed by the register allocator and tearing down the frame, it's
2657    // safe to write to the context register.
2658    __ push(eax);
2659    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2660    __ CallRuntime(Runtime::kTraceExit);
2661  }
2662  if (NeedsEagerFrame()) {
2663    __ mov(esp, ebp);
2664    __ pop(ebp);
2665  }
2666
2667  EmitReturn(instr);
2668}
2669
2670
2671template <class T>
2672void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2673  Register vector_register = ToRegister(instr->temp_vector());
2674  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2675  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2676  DCHECK(slot_register.is(eax));
2677
2678  AllowDeferredHandleDereference vector_structure_check;
2679  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2680  __ mov(vector_register, vector);
2681  // No need to allocate this register.
2682  FeedbackVectorSlot slot = instr->hydrogen()->slot();
2683  int index = vector->GetIndex(slot);
2684  __ mov(slot_register, Immediate(Smi::FromInt(index)));
2685}
2686
2687
2688template <class T>
2689void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2690  Register vector_register = ToRegister(instr->temp_vector());
2691  Register slot_register = ToRegister(instr->temp_slot());
2692
2693  AllowDeferredHandleDereference vector_structure_check;
2694  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2695  __ mov(vector_register, vector);
2696  FeedbackVectorSlot slot = instr->hydrogen()->slot();
2697  int index = vector->GetIndex(slot);
2698  __ mov(slot_register, Immediate(Smi::FromInt(index)));
2699}
2700
2701
2702void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2703  DCHECK(ToRegister(instr->context()).is(esi));
2704  DCHECK(ToRegister(instr->result()).is(eax));
2705
2706  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2707  Handle<Code> ic =
2708      CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
2709          .code();
2710  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2711}
2712
2713
2714void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2715  Register context = ToRegister(instr->context());
2716  Register result = ToRegister(instr->result());
2717  __ mov(result, ContextOperand(context, instr->slot_index()));
2718
2719  if (instr->hydrogen()->RequiresHoleCheck()) {
2720    __ cmp(result, factory()->the_hole_value());
2721    if (instr->hydrogen()->DeoptimizesOnHole()) {
2722      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2723    } else {
2724      Label is_not_hole;
2725      __ j(not_equal, &is_not_hole, Label::kNear);
2726      __ mov(result, factory()->undefined_value());
2727      __ bind(&is_not_hole);
2728    }
2729  }
2730}
2731
2732
2733void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2734  Register context = ToRegister(instr->context());
2735  Register value = ToRegister(instr->value());
2736
2737  Label skip_assignment;
2738
2739  Operand target = ContextOperand(context, instr->slot_index());
2740  if (instr->hydrogen()->RequiresHoleCheck()) {
2741    __ cmp(target, factory()->the_hole_value());
2742    if (instr->hydrogen()->DeoptimizesOnHole()) {
2743      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2744    } else {
2745      __ j(not_equal, &skip_assignment, Label::kNear);
2746    }
2747  }
2748
2749  __ mov(target, value);
2750  if (instr->hydrogen()->NeedsWriteBarrier()) {
2751    SmiCheck check_needed =
2752        instr->hydrogen()->value()->type().IsHeapObject()
2753            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2754    Register temp = ToRegister(instr->temp());
2755    int offset = Context::SlotOffset(instr->slot_index());
2756    __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
2757                              EMIT_REMEMBERED_SET, check_needed);
2758  }
2759
2760  __ bind(&skip_assignment);
2761}
2762
2763
2764void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2765  HObjectAccess access = instr->hydrogen()->access();
2766  int offset = access.offset();
2767
2768  if (access.IsExternalMemory()) {
2769    Register result = ToRegister(instr->result());
2770    MemOperand operand = instr->object()->IsConstantOperand()
2771        ? MemOperand::StaticVariable(ToExternalReference(
2772                LConstantOperand::cast(instr->object())))
2773        : MemOperand(ToRegister(instr->object()), offset);
2774    __ Load(result, operand, access.representation());
2775    return;
2776  }
2777
2778  Register object = ToRegister(instr->object());
2779  if (instr->hydrogen()->representation().IsDouble()) {
2780    X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
2781    return;
2782  }
2783
2784  Register result = ToRegister(instr->result());
2785  if (!access.IsInobject()) {
2786    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2787    object = result;
2788  }
2789  __ Load(result, FieldOperand(object, offset), access.representation());
2790}
2791
2792
2793void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2794  DCHECK(!operand->IsDoubleRegister());
2795  if (operand->IsConstantOperand()) {
2796    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2797    AllowDeferredHandleDereference smi_check;
2798    if (object->IsSmi()) {
2799      __ Push(Handle<Smi>::cast(object));
2800    } else {
2801      __ PushHeapObject(Handle<HeapObject>::cast(object));
2802    }
2803  } else if (operand->IsRegister()) {
2804    __ push(ToRegister(operand));
2805  } else {
2806    __ push(ToOperand(operand));
2807  }
2808}
2809
2810
2811void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2812  DCHECK(ToRegister(instr->context()).is(esi));
2813  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2814  DCHECK(ToRegister(instr->result()).is(eax));
2815
2816  __ mov(LoadDescriptor::NameRegister(), instr->name());
2817  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2818  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
2819  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2820}
2821
2822
2823void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2824  Register function = ToRegister(instr->function());
2825  Register temp = ToRegister(instr->temp());
2826  Register result = ToRegister(instr->result());
2827
2828  // Get the prototype or initial map from the function.
2829  __ mov(result,
2830         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2831
2832  // Check that the function has a prototype or an initial map.
2833  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2834  DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2835
2836  // If the function does not have an initial map, we're done.
2837  Label done;
2838  __ CmpObjectType(result, MAP_TYPE, temp);
2839  __ j(not_equal, &done, Label::kNear);
2840
2841  // Get the prototype from the initial map.
2842  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2843
2844  // All done.
2845  __ bind(&done);
2846}
2847
2848
2849void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2850  Register result = ToRegister(instr->result());
2851  __ LoadRoot(result, instr->index());
2852}
2853
2854
2855void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2856  Register arguments = ToRegister(instr->arguments());
2857  Register result = ToRegister(instr->result());
2858  if (instr->length()->IsConstantOperand() &&
2859      instr->index()->IsConstantOperand()) {
2860    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2861    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2862    int index = (const_length - const_index) + 1;
2863    __ mov(result, Operand(arguments, index * kPointerSize));
2864  } else {
2865    Register length = ToRegister(instr->length());
2866    Operand index = ToOperand(instr->index());
2867    // There are two words between the frame pointer and the last argument.
2868    // Subtracting from length accounts for one of them add one more.
2869    __ sub(length, index);
2870    __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2871  }
2872}
2873
2874
2875void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2876  ElementsKind elements_kind = instr->elements_kind();
2877  LOperand* key = instr->key();
2878  if (!key->IsConstantOperand() &&
2879      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
2880                                  elements_kind)) {
2881    __ SmiUntag(ToRegister(key));
2882  }
2883  Operand operand(BuildFastArrayOperand(
2884      instr->elements(),
2885      key,
2886      instr->hydrogen()->key()->representation(),
2887      elements_kind,
2888      instr->base_offset()));
2889  if (elements_kind == FLOAT32_ELEMENTS) {
2890    X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
2891  } else if (elements_kind == FLOAT64_ELEMENTS) {
2892    X87Mov(ToX87Register(instr->result()), operand);
2893  } else {
2894    Register result(ToRegister(instr->result()));
2895    switch (elements_kind) {
2896      case INT8_ELEMENTS:
2897        __ movsx_b(result, operand);
2898        break;
2899      case UINT8_ELEMENTS:
2900      case UINT8_CLAMPED_ELEMENTS:
2901        __ movzx_b(result, operand);
2902        break;
2903      case INT16_ELEMENTS:
2904        __ movsx_w(result, operand);
2905        break;
2906      case UINT16_ELEMENTS:
2907        __ movzx_w(result, operand);
2908        break;
2909      case INT32_ELEMENTS:
2910        __ mov(result, operand);
2911        break;
2912      case UINT32_ELEMENTS:
2913        __ mov(result, operand);
2914        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2915          __ test(result, Operand(result));
2916          DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
2917        }
2918        break;
2919      case FLOAT32_ELEMENTS:
2920      case FLOAT64_ELEMENTS:
2921      case FAST_SMI_ELEMENTS:
2922      case FAST_ELEMENTS:
2923      case FAST_DOUBLE_ELEMENTS:
2924      case FAST_HOLEY_SMI_ELEMENTS:
2925      case FAST_HOLEY_ELEMENTS:
2926      case FAST_HOLEY_DOUBLE_ELEMENTS:
2927      case DICTIONARY_ELEMENTS:
2928      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2929      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2930      case FAST_STRING_WRAPPER_ELEMENTS:
2931      case SLOW_STRING_WRAPPER_ELEMENTS:
2932      case NO_ELEMENTS:
2933        UNREACHABLE();
2934        break;
2935    }
2936  }
2937}
2938
2939
2940void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2941  if (instr->hydrogen()->RequiresHoleCheck()) {
2942    Operand hole_check_operand = BuildFastArrayOperand(
2943        instr->elements(), instr->key(),
2944        instr->hydrogen()->key()->representation(),
2945        FAST_DOUBLE_ELEMENTS,
2946        instr->base_offset() + sizeof(kHoleNanLower32));
2947    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
2948    DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2949  }
2950
2951  Operand double_load_operand = BuildFastArrayOperand(
2952      instr->elements(),
2953      instr->key(),
2954      instr->hydrogen()->key()->representation(),
2955      FAST_DOUBLE_ELEMENTS,
2956      instr->base_offset());
2957  X87Mov(ToX87Register(instr->result()), double_load_operand);
2958}
2959
2960
2961void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2962  Register result = ToRegister(instr->result());
2963
2964  // Load the result.
2965  __ mov(result,
2966         BuildFastArrayOperand(instr->elements(), instr->key(),
2967                               instr->hydrogen()->key()->representation(),
2968                               FAST_ELEMENTS, instr->base_offset()));
2969
2970  // Check for the hole value.
2971  if (instr->hydrogen()->RequiresHoleCheck()) {
2972    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2973      __ test(result, Immediate(kSmiTagMask));
2974      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
2975    } else {
2976      __ cmp(result, factory()->the_hole_value());
2977      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
2978    }
2979  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2980    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2981    Label done;
2982    __ cmp(result, factory()->the_hole_value());
2983    __ j(not_equal, &done);
2984    if (info()->IsStub()) {
2985      // A stub can safely convert the hole to undefined only if the array
2986      // protector cell contains (Smi) Isolate::kArrayProtectorValid.
2987      // Otherwise it needs to bail out.
2988      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2989      __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
2990             Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
2991      DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
2992    }
2993    __ mov(result, isolate()->factory()->undefined_value());
2994    __ bind(&done);
2995  }
2996}
2997
2998
2999void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3000  if (instr->is_fixed_typed_array()) {
3001    DoLoadKeyedExternalArray(instr);
3002  } else if (instr->hydrogen()->representation().IsDouble()) {
3003    DoLoadKeyedFixedDoubleArray(instr);
3004  } else {
3005    DoLoadKeyedFixedArray(instr);
3006  }
3007}
3008
3009
3010Operand LCodeGen::BuildFastArrayOperand(
3011    LOperand* elements_pointer,
3012    LOperand* key,
3013    Representation key_representation,
3014    ElementsKind elements_kind,
3015    uint32_t base_offset) {
3016  Register elements_pointer_reg = ToRegister(elements_pointer);
3017  int element_shift_size = ElementsKindToShiftSize(elements_kind);
3018  int shift_size = element_shift_size;
3019  if (key->IsConstantOperand()) {
3020    int constant_value = ToInteger32(LConstantOperand::cast(key));
3021    if (constant_value & 0xF0000000) {
3022      Abort(kArrayIndexConstantValueTooBig);
3023    }
3024    return Operand(elements_pointer_reg,
3025                   ((constant_value) << shift_size)
3026                       + base_offset);
3027  } else {
3028    // Take the tag bit into account while computing the shift size.
3029    if (key_representation.IsSmi() && (shift_size >= 1)) {
3030      shift_size -= kSmiTagSize;
3031    }
3032    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3033    return Operand(elements_pointer_reg,
3034                   ToRegister(key),
3035                   scale_factor,
3036                   base_offset);
3037  }
3038}
3039
3040
3041void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3042  DCHECK(ToRegister(instr->context()).is(esi));
3043  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3044  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3045
3046  EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3047
3048  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
3049  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3050}
3051
3052
3053void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3054  Register result = ToRegister(instr->result());
3055
3056  if (instr->hydrogen()->from_inlined()) {
3057    __ lea(result, Operand(esp, -2 * kPointerSize));
3058  } else if (instr->hydrogen()->arguments_adaptor()) {
3059    // Check for arguments adapter frame.
3060    Label done, adapted;
3061    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3062    __ mov(result,
3063           Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
3064    __ cmp(Operand(result),
3065           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3066    __ j(equal, &adapted, Label::kNear);
3067
3068    // No arguments adaptor frame.
3069    __ mov(result, Operand(ebp));
3070    __ jmp(&done, Label::kNear);
3071
3072    // Arguments adaptor frame present.
3073    __ bind(&adapted);
3074    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3075
3076    // Result is the frame pointer for the frame if not adapted and for the real
3077    // frame below the adaptor frame if adapted.
3078    __ bind(&done);
3079  } else {
3080    __ mov(result, Operand(ebp));
3081  }
3082}
3083
3084
3085void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3086  Operand elem = ToOperand(instr->elements());
3087  Register result = ToRegister(instr->result());
3088
3089  Label done;
3090
3091  // If no arguments adaptor frame the number of arguments is fixed.
3092  __ cmp(ebp, elem);
3093  __ mov(result, Immediate(scope()->num_parameters()));
3094  __ j(equal, &done, Label::kNear);
3095
3096  // Arguments adaptor frame present. Get argument length from there.
3097  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3098  __ mov(result, Operand(result,
3099                         ArgumentsAdaptorFrameConstants::kLengthOffset));
3100  __ SmiUntag(result);
3101
3102  // Argument length is in result register.
3103  __ bind(&done);
3104}
3105
3106
3107void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3108  Register receiver = ToRegister(instr->receiver());
3109  Register function = ToRegister(instr->function());
3110
3111  // If the receiver is null or undefined, we have to pass the global
3112  // object as a receiver to normal functions. Values have to be
3113  // passed unchanged to builtins and strict-mode functions.
3114  Label receiver_ok, global_object;
3115  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3116  Register scratch = ToRegister(instr->temp());
3117
3118  if (!instr->hydrogen()->known_function()) {
3119    // Do not transform the receiver to object for strict mode
3120    // functions.
3121    __ mov(scratch,
3122           FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3123    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3124              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
3125    __ j(not_equal, &receiver_ok, dist);
3126
3127    // Do not transform the receiver to object for builtins.
3128    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3129              Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
3130    __ j(not_equal, &receiver_ok, dist);
3131  }
3132
3133  // Normal function. Replace undefined or null with global receiver.
3134  __ cmp(receiver, factory()->null_value());
3135  __ j(equal, &global_object, Label::kNear);
3136  __ cmp(receiver, factory()->undefined_value());
3137  __ j(equal, &global_object, Label::kNear);
3138
3139  // The receiver should be a JS object.
3140  __ test(receiver, Immediate(kSmiTagMask));
3141  DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
3142  __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
3143  DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
3144
3145  __ jmp(&receiver_ok, Label::kNear);
3146  __ bind(&global_object);
3147  __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
3148  __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
3149  __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
3150  __ bind(&receiver_ok);
3151}
3152
3153
3154void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3155  Register receiver = ToRegister(instr->receiver());
3156  Register function = ToRegister(instr->function());
3157  Register length = ToRegister(instr->length());
3158  Register elements = ToRegister(instr->elements());
3159  DCHECK(receiver.is(eax));  // Used for parameter count.
3160  DCHECK(function.is(edi));  // Required by InvokeFunction.
3161  DCHECK(ToRegister(instr->result()).is(eax));
3162
3163  // Copy the arguments to this function possibly from the
3164  // adaptor frame below it.
3165  const uint32_t kArgumentsLimit = 1 * KB;
3166  __ cmp(length, kArgumentsLimit);
3167  DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
3168
3169  __ push(receiver);
3170  __ mov(receiver, length);
3171
3172  // Loop through the arguments pushing them onto the execution
3173  // stack.
3174  Label invoke, loop;
3175  // length is a small non-negative integer, due to the test above.
3176  __ test(length, Operand(length));
3177  __ j(zero, &invoke, Label::kNear);
3178  __ bind(&loop);
3179  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3180  __ dec(length);
3181  __ j(not_zero, &loop);
3182
3183  // Invoke the function.
3184  __ bind(&invoke);
3185
3186  InvokeFlag flag = CALL_FUNCTION;
3187  if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3188    DCHECK(!info()->saves_caller_doubles());
3189    // TODO(ishell): drop current frame before pushing arguments to the stack.
3190    flag = JUMP_FUNCTION;
3191    ParameterCount actual(eax);
3192    // It is safe to use ebx, ecx and edx as scratch registers here given that
3193    // 1) we are not going to return to caller function anyway,
3194    // 2) ebx (expected arguments count) and edx (new.target) will be
3195    //    initialized below.
3196    PrepareForTailCall(actual, ebx, ecx, edx);
3197  }
3198
3199  DCHECK(instr->HasPointerMap());
3200  LPointerMap* pointers = instr->pointer_map();
3201  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3202  ParameterCount actual(eax);
3203  __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3204}
3205
3206
3207void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3208  __ int3();
3209}
3210
3211
3212void LCodeGen::DoPushArgument(LPushArgument* instr) {
3213  LOperand* argument = instr->value();
3214  EmitPushTaggedOperand(argument);
3215}
3216
3217
3218void LCodeGen::DoDrop(LDrop* instr) {
3219  __ Drop(instr->count());
3220}
3221
3222
3223void LCodeGen::DoThisFunction(LThisFunction* instr) {
3224  Register result = ToRegister(instr->result());
3225  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3226}
3227
3228
3229void LCodeGen::DoContext(LContext* instr) {
3230  Register result = ToRegister(instr->result());
3231  if (info()->IsOptimizing()) {
3232    __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3233  } else {
3234    // If there is no frame, the context must be in esi.
3235    DCHECK(result.is(esi));
3236  }
3237}
3238
3239
3240void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3241  DCHECK(ToRegister(instr->context()).is(esi));
3242  __ push(Immediate(instr->hydrogen()->pairs()));
3243  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3244  CallRuntime(Runtime::kDeclareGlobals, instr);
3245}
3246
3247void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3248                                 int formal_parameter_count, int arity,
3249                                 bool is_tail_call, LInstruction* instr) {
3250  bool dont_adapt_arguments =
3251      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3252  bool can_invoke_directly =
3253      dont_adapt_arguments || formal_parameter_count == arity;
3254
3255  Register function_reg = edi;
3256
3257  if (can_invoke_directly) {
3258    // Change context.
3259    __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
3260
3261    // Always initialize new target and number of actual arguments.
3262    __ mov(edx, factory()->undefined_value());
3263    __ mov(eax, arity);
3264
3265    bool is_self_call = function.is_identical_to(info()->closure());
3266
3267    // Invoke function directly.
3268    if (is_self_call) {
3269      Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3270      if (is_tail_call) {
3271        __ Jump(self, RelocInfo::CODE_TARGET);
3272      } else {
3273        __ Call(self, RelocInfo::CODE_TARGET);
3274      }
3275    } else {
3276      Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
3277      if (is_tail_call) {
3278        __ jmp(target);
3279      } else {
3280        __ call(target);
3281      }
3282    }
3283
3284    if (!is_tail_call) {
3285      // Set up deoptimization.
3286      RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3287    }
3288  } else {
3289    // We need to adapt arguments.
3290    LPointerMap* pointers = instr->pointer_map();
3291    SafepointGenerator generator(
3292        this, pointers, Safepoint::kLazyDeopt);
3293    ParameterCount actual(arity);
3294    ParameterCount expected(formal_parameter_count);
3295    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3296    __ InvokeFunction(function_reg, expected, actual, flag, generator);
3297  }
3298}
3299
3300
3301void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3302  DCHECK(ToRegister(instr->result()).is(eax));
3303
3304  if (instr->hydrogen()->IsTailCall()) {
3305    if (NeedsEagerFrame()) __ leave();
3306
3307    if (instr->target()->IsConstantOperand()) {
3308      LConstantOperand* target = LConstantOperand::cast(instr->target());
3309      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3310      __ jmp(code, RelocInfo::CODE_TARGET);
3311    } else {
3312      DCHECK(instr->target()->IsRegister());
3313      Register target = ToRegister(instr->target());
3314      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3315      __ jmp(target);
3316    }
3317  } else {
3318    LPointerMap* pointers = instr->pointer_map();
3319    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3320
3321    if (instr->target()->IsConstantOperand()) {
3322      LConstantOperand* target = LConstantOperand::cast(instr->target());
3323      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3324      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3325      __ call(code, RelocInfo::CODE_TARGET);
3326    } else {
3327      DCHECK(instr->target()->IsRegister());
3328      Register target = ToRegister(instr->target());
3329      generator.BeforeCall(__ CallSize(Operand(target)));
3330      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3331      __ call(target);
3332    }
3333    generator.AfterCall();
3334  }
3335}
3336
3337
3338void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3339  Register input_reg = ToRegister(instr->value());
3340  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3341         factory()->heap_number_map());
3342  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3343
3344  Label slow, allocated, done;
3345  uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
3346  available_regs &= ~input_reg.bit();
3347  if (instr->context()->IsRegister()) {
3348    // Make sure that the context isn't overwritten in the AllocateHeapNumber
3349    // macro below.
3350    available_regs &= ~ToRegister(instr->context()).bit();
3351  }
3352
3353  Register tmp =
3354      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
3355  available_regs &= ~tmp.bit();
3356  Register tmp2 =
3357      Register::from_code(base::bits::CountTrailingZeros32(available_regs));
3358
3359  // Preserve the value of all registers.
3360  PushSafepointRegistersScope scope(this);
3361
3362  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3363  // Check the sign of the argument. If the argument is positive, just
3364  // return it. We do not need to patch the stack since |input| and
3365  // |result| are the same register and |input| will be restored
3366  // unchanged by popping safepoint registers.
3367  __ test(tmp, Immediate(HeapNumber::kSignMask));
3368  __ j(zero, &done, Label::kNear);
3369
3370  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3371  __ jmp(&allocated, Label::kNear);
3372
3373  // Slow case: Call the runtime system to do the number allocation.
3374  __ bind(&slow);
3375  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3376                          instr, instr->context());
3377  // Set the pointer to the new heap number in tmp.
3378  if (!tmp.is(eax)) __ mov(tmp, eax);
3379  // Restore input_reg after call to runtime.
3380  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3381
3382  __ bind(&allocated);
3383  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3384  __ and_(tmp2, ~HeapNumber::kSignMask);
3385  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3386  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3387  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3388  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3389
3390  __ bind(&done);
3391}
3392
3393
3394void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3395  Register input_reg = ToRegister(instr->value());
3396  __ test(input_reg, Operand(input_reg));
3397  Label is_positive;
3398  __ j(not_sign, &is_positive, Label::kNear);
3399  __ neg(input_reg);  // Sets flags.
3400  DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
3401  __ bind(&is_positive);
3402}
3403
3404
3405void LCodeGen::DoMathAbs(LMathAbs* instr) {
3406  // Class for deferred case.
3407  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3408   public:
3409    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3410                                    LMathAbs* instr,
3411                                    const X87Stack& x87_stack)
3412        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3413    void Generate() override {
3414      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3415    }
3416    LInstruction* instr() override { return instr_; }
3417
3418   private:
3419    LMathAbs* instr_;
3420  };
3421
3422  DCHECK(instr->value()->Equals(instr->result()));
3423  Representation r = instr->hydrogen()->value()->representation();
3424
3425  if (r.IsDouble()) {
3426    X87Register value = ToX87Register(instr->value());
3427    X87Fxch(value);
3428    __ fabs();
3429  } else if (r.IsSmiOrInteger32()) {
3430    EmitIntegerMathAbs(instr);
3431  } else {  // Tagged case.
3432    DeferredMathAbsTaggedHeapNumber* deferred =
3433        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3434    Register input_reg = ToRegister(instr->value());
3435    // Smi check.
3436    __ JumpIfNotSmi(input_reg, deferred->entry());
3437    EmitIntegerMathAbs(instr);
3438    __ bind(deferred->exit());
3439  }
3440}
3441
3442
3443void LCodeGen::DoMathFloor(LMathFloor* instr) {
3444  Register output_reg = ToRegister(instr->result());
3445  X87Register input_reg = ToX87Register(instr->value());
3446  X87Fxch(input_reg);
3447
3448  Label not_minus_zero, done;
3449  // Deoptimize on unordered.
3450  __ fldz();
3451  __ fld(1);
3452  __ FCmp();
3453  DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
3454  __ j(below, &not_minus_zero, Label::kNear);
3455
3456  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3457    // Check for negative zero.
3458    __ j(not_equal, &not_minus_zero, Label::kNear);
3459    // +- 0.0.
3460    __ fld(0);
3461    __ FXamSign();
3462    DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3463    __ Move(output_reg, Immediate(0));
3464    __ jmp(&done, Label::kFar);
3465  }
3466
3467  // Positive input.
3468  // rc=01B, round down.
3469  __ bind(&not_minus_zero);
3470  __ fnclex();
3471  __ X87SetRC(0x0400);
3472  __ sub(esp, Immediate(kPointerSize));
3473  __ fist_s(Operand(esp, 0));
3474  __ pop(output_reg);
3475  __ X87SetRC(0x0000);
3476  __ X87CheckIA();
3477  DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
3478  __ fnclex();
3479  __ X87SetRC(0x0000);
3480  __ bind(&done);
3481}
3482
3483
3484void LCodeGen::DoMathRound(LMathRound* instr) {
3485  X87Register input_reg = ToX87Register(instr->value());
3486  Register result = ToRegister(instr->result());
3487  X87Fxch(input_reg);
3488  Label below_one_half, below_minus_one_half, done;
3489
3490  ExternalReference one_half = ExternalReference::address_of_one_half();
3491  ExternalReference minus_one_half =
3492      ExternalReference::address_of_minus_one_half();
3493
3494  __ fld_d(Operand::StaticVariable(one_half));
3495  __ fld(1);
3496  __ FCmp();
3497  __ j(carry, &below_one_half);
3498
3499  // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
3500  __ fld(0);
3501  __ fadd_d(Operand::StaticVariable(one_half));
3502  // rc=11B, round toward zero.
3503  __ X87SetRC(0x0c00);
3504  __ sub(esp, Immediate(kPointerSize));
3505  // Clear exception bits.
3506  __ fnclex();
3507  __ fistp_s(MemOperand(esp, 0));
3508  // Restore round mode.
3509  __ X87SetRC(0x0000);
3510  // Check overflow.
3511  __ X87CheckIA();
3512  __ pop(result);
3513  DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
3514  __ fnclex();
3515  // Restore round mode.
3516  __ X87SetRC(0x0000);
3517  __ jmp(&done);
3518
3519  __ bind(&below_one_half);
3520  __ fld_d(Operand::StaticVariable(minus_one_half));
3521  __ fld(1);
3522  __ FCmp();
3523  __ j(carry, &below_minus_one_half);
3524  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3525  // we can ignore the difference between a result of -0 and +0.
3526  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3527    // If the sign is positive, we return +0.
3528    __ fld(0);
3529    __ FXamSign();
3530    DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
3531  }
3532  __ Move(result, Immediate(0));
3533  __ jmp(&done);
3534
3535  __ bind(&below_minus_one_half);
3536  __ fld(0);
3537  __ fadd_d(Operand::StaticVariable(one_half));
3538  // rc=01B, round down.
3539  __ X87SetRC(0x0400);
3540  __ sub(esp, Immediate(kPointerSize));
3541  // Clear exception bits.
3542  __ fnclex();
3543  __ fistp_s(MemOperand(esp, 0));
3544  // Restore round mode.
3545  __ X87SetRC(0x0000);
3546  // Check overflow.
3547  __ X87CheckIA();
3548  __ pop(result);
3549  DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
3550  __ fnclex();
3551  // Restore round mode.
3552  __ X87SetRC(0x0000);
3553
3554  __ bind(&done);
3555}
3556
3557
3558void LCodeGen::DoMathFround(LMathFround* instr) {
3559  X87Register input_reg = ToX87Register(instr->value());
3560  X87Fxch(input_reg);
3561  __ sub(esp, Immediate(kPointerSize));
3562  __ fstp_s(MemOperand(esp, 0));
3563  X87Fld(MemOperand(esp, 0), kX87FloatOperand);
3564  __ add(esp, Immediate(kPointerSize));
3565}
3566
3567
3568void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3569  X87Register input_reg = ToX87Register(instr->value());
3570  __ X87SetFPUCW(0x027F);
3571  X87Fxch(input_reg);
3572  __ fsqrt();
3573  __ X87SetFPUCW(0x037F);
3574}
3575
3576
3577void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3578  X87Register input_reg = ToX87Register(instr->value());
3579  DCHECK(ToX87Register(instr->result()).is(input_reg));
3580  X87Fxch(input_reg);
3581  // Note that according to ECMA-262 15.8.2.13:
3582  // Math.pow(-Infinity, 0.5) == Infinity
3583  // Math.sqrt(-Infinity) == NaN
3584  Label done, sqrt;
3585  // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
3586  __ fxam();
3587  __ push(eax);
3588  __ fnstsw_ax();
3589  __ and_(eax, Immediate(0x4700));
3590  __ cmp(eax, Immediate(0x0700));
3591  __ j(not_equal, &sqrt, Label::kNear);
3592  // If input is -Infinity, return Infinity.
3593  __ fchs();
3594  __ jmp(&done, Label::kNear);
3595
3596  // Square root.
3597  __ bind(&sqrt);
3598  __ fldz();
3599  __ faddp();  // Convert -0 to +0.
3600  __ fsqrt();
3601  __ bind(&done);
3602  __ pop(eax);
3603}
3604
3605
3606void LCodeGen::DoPower(LPower* instr) {
3607  Representation exponent_type = instr->hydrogen()->right()->representation();
3608  X87Register result = ToX87Register(instr->result());
3609  // Having marked this as a call, we can use any registers.
3610  X87Register base = ToX87Register(instr->left());
3611  ExternalReference one_half = ExternalReference::address_of_one_half();
3612
3613  if (exponent_type.IsSmi()) {
3614    Register exponent = ToRegister(instr->right());
3615    X87LoadForUsage(base);
3616    __ SmiUntag(exponent);
3617    __ push(exponent);
3618    __ fild_s(MemOperand(esp, 0));
3619    __ pop(exponent);
3620  } else if (exponent_type.IsTagged()) {
3621    Register exponent = ToRegister(instr->right());
3622    Register temp = exponent.is(ecx) ? eax : ecx;
3623    Label no_deopt, done;
3624    X87LoadForUsage(base);
3625    __ JumpIfSmi(exponent, &no_deopt);
3626    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
3627    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
3628    // Heap number(double)
3629    __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
3630    __ jmp(&done);
3631    // SMI
3632    __ bind(&no_deopt);
3633    __ SmiUntag(exponent);
3634    __ push(exponent);
3635    __ fild_s(MemOperand(esp, 0));
3636    __ pop(exponent);
3637    __ bind(&done);
3638  } else if (exponent_type.IsInteger32()) {
3639    Register exponent = ToRegister(instr->right());
3640    X87LoadForUsage(base);
3641    __ push(exponent);
3642    __ fild_s(MemOperand(esp, 0));
3643    __ pop(exponent);
3644  } else {
3645    DCHECK(exponent_type.IsDouble());
3646    X87Register exponent_double = ToX87Register(instr->right());
3647    X87LoadForUsage(base, exponent_double);
3648  }
3649
3650  // FP data stack {base, exponent(TOS)}.
3651  // Handle (exponent==+-0.5 && base == -0).
3652  Label not_plus_0;
3653  __ fld(0);
3654  __ fabs();
3655  X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
3656  __ FCmp();
3657  __ j(parity_even, &not_plus_0, Label::kNear);  // NaN.
3658  __ j(not_equal, &not_plus_0, Label::kNear);
3659  __ fldz();
3660  // FP data stack {base, exponent(TOS), zero}.
3661  __ faddp(2);
3662  __ bind(&not_plus_0);
3663
3664  {
3665    __ PrepareCallCFunction(4, eax);
3666    __ fstp_d(MemOperand(esp, kDoubleSize));  // Exponent value.
3667    __ fstp_d(MemOperand(esp, 0));            // Base value.
3668    X87PrepareToWrite(result);
3669    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
3670                     4);
3671    // Return value is in st(0) on ia32.
3672    X87CommitWrite(result);
3673  }
3674}
3675
3676
3677void LCodeGen::DoMathLog(LMathLog* instr) {
3678  DCHECK(instr->value()->Equals(instr->result()));
3679  X87Register result = ToX87Register(instr->result());
3680  X87Register input_reg = ToX87Register(instr->value());
3681  X87Fxch(input_reg);
3682
3683  // Pass one double as argument on the stack.
3684  __ PrepareCallCFunction(2, eax);
3685  __ fstp_d(MemOperand(esp, 0));
3686  X87PrepareToWrite(result);
3687  __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
3688  // Return value is in st(0) on ia32.
3689  X87CommitWrite(result);
3690}
3691
3692
3693void LCodeGen::DoMathClz32(LMathClz32* instr) {
3694  Register input = ToRegister(instr->value());
3695  Register result = ToRegister(instr->result());
3696
3697  __ Lzcnt(result, input);
3698}
3699
3700void LCodeGen::DoMathCos(LMathCos* instr) {
3701  X87Register result = ToX87Register(instr->result());
3702  X87Register input_reg = ToX87Register(instr->value());
3703  __ fld(x87_stack_.st(input_reg));
3704
3705  // Pass one double as argument on the stack.
3706  __ PrepareCallCFunction(2, eax);
3707  __ fstp_d(MemOperand(esp, 0));
3708  X87PrepareToWrite(result);
3709  __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
3710  // Return value is in st(0) on ia32.
3711  X87CommitWrite(result);
3712}
3713
3714void LCodeGen::DoMathSin(LMathSin* instr) {
3715  X87Register result = ToX87Register(instr->result());
3716  X87Register input_reg = ToX87Register(instr->value());
3717  __ fld(x87_stack_.st(input_reg));
3718
3719  // Pass one double as argument on the stack.
3720  __ PrepareCallCFunction(2, eax);
3721  __ fstp_d(MemOperand(esp, 0));
3722  X87PrepareToWrite(result);
3723  __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
3724  // Return value is in st(0) on ia32.
3725  X87CommitWrite(result);
3726}
3727
3728void LCodeGen::DoMathExp(LMathExp* instr) {
3729  X87Register result = ToX87Register(instr->result());
3730  X87Register input_reg = ToX87Register(instr->value());
3731  __ fld(x87_stack_.st(input_reg));
3732
3733  // Pass one double as argument on the stack.
3734  __ PrepareCallCFunction(2, eax);
3735  __ fstp_d(MemOperand(esp, 0));
3736  X87PrepareToWrite(result);
3737  __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
3738  // Return value is in st(0) on ia32.
3739  X87CommitWrite(result);
3740}
3741
3742void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3743                                  Register scratch1, Register scratch2,
3744                                  Register scratch3) {
3745#if DEBUG
3746  if (actual.is_reg()) {
3747    DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3748  } else {
3749    DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3750  }
3751#endif
3752  if (FLAG_code_comments) {
3753    if (actual.is_reg()) {
3754      Comment(";;; PrepareForTailCall, actual: %s {",
3755              RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3756                  actual.reg().code()));
3757    } else {
3758      Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3759    }
3760  }
3761
3762  // Check if next frame is an arguments adaptor frame.
3763  Register caller_args_count_reg = scratch1;
3764  Label no_arguments_adaptor, formal_parameter_count_loaded;
3765  __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3766  __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
3767         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3768  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
3769
3770  // Drop current frame and load arguments count from arguments adaptor frame.
3771  __ mov(ebp, scratch2);
3772  __ mov(caller_args_count_reg,
3773         Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3774  __ SmiUntag(caller_args_count_reg);
3775  __ jmp(&formal_parameter_count_loaded, Label::kNear);
3776
3777  __ bind(&no_arguments_adaptor);
3778  // Load caller's formal parameter count.
3779  __ mov(caller_args_count_reg,
3780         Immediate(info()->literal()->parameter_count()));
3781
3782  __ bind(&formal_parameter_count_loaded);
3783  __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
3784                        ReturnAddressState::kNotOnStack, 0);
3785  Comment(";;; }");
3786}
3787
3788void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3789  HInvokeFunction* hinstr = instr->hydrogen();
3790  DCHECK(ToRegister(instr->context()).is(esi));
3791  DCHECK(ToRegister(instr->function()).is(edi));
3792  DCHECK(instr->HasPointerMap());
3793
3794  bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3795
3796  if (is_tail_call) {
3797    DCHECK(!info()->saves_caller_doubles());
3798    ParameterCount actual(instr->arity());
3799    // It is safe to use ebx, ecx and edx as scratch registers here given that
3800    // 1) we are not going to return to caller function anyway,
3801    // 2) ebx (expected arguments count) and edx (new.target) will be
3802    //    initialized below.
3803    PrepareForTailCall(actual, ebx, ecx, edx);
3804  }
3805
3806  Handle<JSFunction> known_function = hinstr->known_function();
3807  if (known_function.is_null()) {
3808    LPointerMap* pointers = instr->pointer_map();
3809    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3810    ParameterCount actual(instr->arity());
3811    InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3812    __ InvokeFunction(edi, no_reg, actual, flag, generator);
3813  } else {
3814    CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3815                      instr->arity(), is_tail_call, instr);
3816  }
3817}
3818
3819
3820void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3821  DCHECK(ToRegister(instr->context()).is(esi));
3822  DCHECK(ToRegister(instr->constructor()).is(edi));
3823  DCHECK(ToRegister(instr->result()).is(eax));
3824
3825  __ Move(eax, Immediate(instr->arity()));
3826  __ mov(ebx, instr->hydrogen()->site());
3827
3828  ElementsKind kind = instr->hydrogen()->elements_kind();
3829  AllocationSiteOverrideMode override_mode =
3830      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3831          ? DISABLE_ALLOCATION_SITES
3832          : DONT_OVERRIDE;
3833
3834  if (instr->arity() == 0) {
3835    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3836    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3837  } else if (instr->arity() == 1) {
3838    Label done;
3839    if (IsFastPackedElementsKind(kind)) {
3840      Label packed_case;
3841      // We might need a change here
3842      // look at the first argument
3843      __ mov(ecx, Operand(esp, 0));
3844      __ test(ecx, ecx);
3845      __ j(zero, &packed_case, Label::kNear);
3846
3847      ElementsKind holey_kind = GetHoleyElementsKind(kind);
3848      ArraySingleArgumentConstructorStub stub(isolate(),
3849                                              holey_kind,
3850                                              override_mode);
3851      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3852      __ jmp(&done, Label::kNear);
3853      __ bind(&packed_case);
3854    }
3855
3856    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3857    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3858    __ bind(&done);
3859  } else {
3860    ArrayNArgumentsConstructorStub stub(isolate());
3861    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3862  }
3863}
3864
3865
3866void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3867  DCHECK(ToRegister(instr->context()).is(esi));
3868  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3869}
3870
3871
3872void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3873  Register function = ToRegister(instr->function());
3874  Register code_object = ToRegister(instr->code_object());
3875  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
3876  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3877}
3878
3879
3880void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3881  Register result = ToRegister(instr->result());
3882  Register base = ToRegister(instr->base_object());
3883  if (instr->offset()->IsConstantOperand()) {
3884    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3885    __ lea(result, Operand(base, ToInteger32(offset)));
3886  } else {
3887    Register offset = ToRegister(instr->offset());
3888    __ lea(result, Operand(base, offset, times_1, 0));
3889  }
3890}
3891
3892
3893void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3894  Representation representation = instr->hydrogen()->field_representation();
3895
3896  HObjectAccess access = instr->hydrogen()->access();
3897  int offset = access.offset();
3898
3899  if (access.IsExternalMemory()) {
3900    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3901    MemOperand operand = instr->object()->IsConstantOperand()
3902        ? MemOperand::StaticVariable(
3903            ToExternalReference(LConstantOperand::cast(instr->object())))
3904        : MemOperand(ToRegister(instr->object()), offset);
3905    if (instr->value()->IsConstantOperand()) {
3906      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3907      __ mov(operand, Immediate(ToInteger32(operand_value)));
3908    } else {
3909      Register value = ToRegister(instr->value());
3910      __ Store(value, operand, representation);
3911    }
3912    return;
3913  }
3914
3915  Register object = ToRegister(instr->object());
3916  __ AssertNotSmi(object);
3917  DCHECK(!representation.IsSmi() ||
3918         !instr->value()->IsConstantOperand() ||
3919         IsSmi(LConstantOperand::cast(instr->value())));
3920  if (representation.IsDouble()) {
3921    DCHECK(access.IsInobject());
3922    DCHECK(!instr->hydrogen()->has_transition());
3923    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3924    X87Register value = ToX87Register(instr->value());
3925    X87Mov(FieldOperand(object, offset), value);
3926    return;
3927  }
3928
3929  if (instr->hydrogen()->has_transition()) {
3930    Handle<Map> transition = instr->hydrogen()->transition_map();
3931    AddDeprecationDependency(transition);
3932    __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
3933    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3934      Register temp = ToRegister(instr->temp());
3935      Register temp_map = ToRegister(instr->temp_map());
3936      __ mov(temp_map, transition);
3937      __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
3938      // Update the write barrier for the map field.
3939      __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
3940    }
3941  }
3942
3943  // Do the store.
3944  Register write_register = object;
3945  if (!access.IsInobject()) {
3946    write_register = ToRegister(instr->temp());
3947    __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3948  }
3949
3950  MemOperand operand = FieldOperand(write_register, offset);
3951  if (instr->value()->IsConstantOperand()) {
3952    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3953    if (operand_value->IsRegister()) {
3954      Register value = ToRegister(operand_value);
3955      __ Store(value, operand, representation);
3956    } else if (representation.IsInteger32() || representation.IsExternal()) {
3957      Immediate immediate = ToImmediate(operand_value, representation);
3958      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3959      __ mov(operand, immediate);
3960    } else {
3961      Handle<Object> handle_value = ToHandle(operand_value);
3962      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3963      __ mov(operand, handle_value);
3964    }
3965  } else {
3966    Register value = ToRegister(instr->value());
3967    __ Store(value, operand, representation);
3968  }
3969
3970  if (instr->hydrogen()->NeedsWriteBarrier()) {
3971    Register value = ToRegister(instr->value());
3972    Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
3973    // Update the write barrier for the object for in-object properties.
3974    __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
3975                        EMIT_REMEMBERED_SET,
3976                        instr->hydrogen()->SmiCheckForWriteBarrier(),
3977                        instr->hydrogen()->PointersToHereCheckForValue());
3978  }
3979}
3980
3981
3982void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3983  DCHECK(ToRegister(instr->context()).is(esi));
3984  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
3985  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
3986
3987  EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
3988
3989  __ mov(StoreDescriptor::NameRegister(), instr->name());
3990  Handle<Code> ic =
3991      CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
3992          .code();
3993  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3994}
3995
3996
3997void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3998  Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
3999  if (instr->index()->IsConstantOperand()) {
4000    __ cmp(ToOperand(instr->length()),
4001           ToImmediate(LConstantOperand::cast(instr->index()),
4002                       instr->hydrogen()->length()->representation()));
4003    cc = CommuteCondition(cc);
4004  } else if (instr->length()->IsConstantOperand()) {
4005    __ cmp(ToOperand(instr->index()),
4006           ToImmediate(LConstantOperand::cast(instr->length()),
4007                       instr->hydrogen()->index()->representation()));
4008  } else {
4009    __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4010  }
4011  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4012    Label done;
4013    __ j(NegateCondition(cc), &done, Label::kNear);
4014    __ int3();
4015    __ bind(&done);
4016  } else {
4017    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4018  }
4019}
4020
4021
4022void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4023  ElementsKind elements_kind = instr->elements_kind();
4024  LOperand* key = instr->key();
4025  if (!key->IsConstantOperand() &&
4026      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4027                                  elements_kind)) {
4028    __ SmiUntag(ToRegister(key));
4029  }
4030  Operand operand(BuildFastArrayOperand(
4031      instr->elements(),
4032      key,
4033      instr->hydrogen()->key()->representation(),
4034      elements_kind,
4035      instr->base_offset()));
4036  if (elements_kind == FLOAT32_ELEMENTS) {
4037    X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
4038  } else if (elements_kind == FLOAT64_ELEMENTS) {
4039    uint64_t int_val = kHoleNanInt64;
4040    int32_t lower = static_cast<int32_t>(int_val);
4041    int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4042    Operand operand2 = BuildFastArrayOperand(
4043        instr->elements(), instr->key(),
4044        instr->hydrogen()->key()->representation(), elements_kind,
4045        instr->base_offset() + kPointerSize);
4046
4047    Label no_special_nan_handling, done;
4048    X87Register value = ToX87Register(instr->value());
4049    X87Fxch(value);
4050    __ lea(esp, Operand(esp, -kDoubleSize));
4051    __ fst_d(MemOperand(esp, 0));
4052    __ lea(esp, Operand(esp, kDoubleSize));
4053    int offset = sizeof(kHoleNanUpper32);
4054    __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
4055    __ j(not_equal, &no_special_nan_handling, Label::kNear);
4056    __ mov(operand, Immediate(lower));
4057    __ mov(operand2, Immediate(upper));
4058    __ jmp(&done, Label::kNear);
4059
4060    __ bind(&no_special_nan_handling);
4061    __ fst_d(operand);
4062    __ bind(&done);
4063  } else {
4064    Register value = ToRegister(instr->value());
4065    switch (elements_kind) {
4066      case UINT8_ELEMENTS:
4067      case INT8_ELEMENTS:
4068      case UINT8_CLAMPED_ELEMENTS:
4069        __ mov_b(operand, value);
4070        break;
4071      case UINT16_ELEMENTS:
4072      case INT16_ELEMENTS:
4073        __ mov_w(operand, value);
4074        break;
4075      case UINT32_ELEMENTS:
4076      case INT32_ELEMENTS:
4077        __ mov(operand, value);
4078        break;
4079      case FLOAT32_ELEMENTS:
4080      case FLOAT64_ELEMENTS:
4081      case FAST_SMI_ELEMENTS:
4082      case FAST_ELEMENTS:
4083      case FAST_DOUBLE_ELEMENTS:
4084      case FAST_HOLEY_SMI_ELEMENTS:
4085      case FAST_HOLEY_ELEMENTS:
4086      case FAST_HOLEY_DOUBLE_ELEMENTS:
4087      case DICTIONARY_ELEMENTS:
4088      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4089      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4090      case FAST_STRING_WRAPPER_ELEMENTS:
4091      case SLOW_STRING_WRAPPER_ELEMENTS:
4092      case NO_ELEMENTS:
4093        UNREACHABLE();
4094        break;
4095    }
4096  }
4097}
4098
4099
4100void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4101  Operand double_store_operand = BuildFastArrayOperand(
4102      instr->elements(),
4103      instr->key(),
4104      instr->hydrogen()->key()->representation(),
4105      FAST_DOUBLE_ELEMENTS,
4106      instr->base_offset());
4107
4108  uint64_t int_val = kHoleNanInt64;
4109  int32_t lower = static_cast<int32_t>(int_val);
4110  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4111  Operand double_store_operand2 = BuildFastArrayOperand(
4112      instr->elements(), instr->key(),
4113      instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS,
4114      instr->base_offset() + kPointerSize);
4115
4116  if (instr->hydrogen()->IsConstantHoleStore()) {
4117    // This means we should store the (double) hole. No floating point
4118    // registers required.
4119    __ mov(double_store_operand, Immediate(lower));
4120    __ mov(double_store_operand2, Immediate(upper));
4121  } else {
4122    Label no_special_nan_handling, done;
4123    X87Register value = ToX87Register(instr->value());
4124    X87Fxch(value);
4125
4126    if (instr->NeedsCanonicalization()) {
4127      __ fld(0);
4128      __ fld(0);
4129      __ FCmp();
4130      __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4131      // All NaNs are Canonicalized to 0x7fffffffffffffff
4132      __ mov(double_store_operand, Immediate(0xffffffff));
4133      __ mov(double_store_operand2, Immediate(0x7fffffff));
4134      __ jmp(&done, Label::kNear);
4135    } else {
4136      __ lea(esp, Operand(esp, -kDoubleSize));
4137      __ fst_d(MemOperand(esp, 0));
4138      __ lea(esp, Operand(esp, kDoubleSize));
4139      int offset = sizeof(kHoleNanUpper32);
4140      __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
4141      __ j(not_equal, &no_special_nan_handling, Label::kNear);
4142      __ mov(double_store_operand, Immediate(lower));
4143      __ mov(double_store_operand2, Immediate(upper));
4144      __ jmp(&done, Label::kNear);
4145    }
4146    __ bind(&no_special_nan_handling);
4147    __ fst_d(double_store_operand);
4148    __ bind(&done);
4149  }
4150}
4151
4152
4153void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4154  Register elements = ToRegister(instr->elements());
4155  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4156
4157  Operand operand = BuildFastArrayOperand(
4158      instr->elements(),
4159      instr->key(),
4160      instr->hydrogen()->key()->representation(),
4161      FAST_ELEMENTS,
4162      instr->base_offset());
4163  if (instr->value()->IsRegister()) {
4164    __ mov(operand, ToRegister(instr->value()));
4165  } else {
4166    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4167    if (IsSmi(operand_value)) {
4168      Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4169      __ mov(operand, immediate);
4170    } else {
4171      DCHECK(!IsInteger32(operand_value));
4172      Handle<Object> handle_value = ToHandle(operand_value);
4173      __ mov(operand, handle_value);
4174    }
4175  }
4176
4177  if (instr->hydrogen()->NeedsWriteBarrier()) {
4178    DCHECK(instr->value()->IsRegister());
4179    Register value = ToRegister(instr->value());
4180    DCHECK(!instr->key()->IsConstantOperand());
4181    SmiCheck check_needed =
4182        instr->hydrogen()->value()->type().IsHeapObject()
4183          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4184    // Compute address of modified element and store it into key register.
4185    __ lea(key, operand);
4186    __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
4187                   check_needed,
4188                   instr->hydrogen()->PointersToHereCheckForValue());
4189  }
4190}
4191
4192
4193void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4194  // By cases...external, fast-double, fast
4195  if (instr->is_fixed_typed_array()) {
4196    DoStoreKeyedExternalArray(instr);
4197  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4198    DoStoreKeyedFixedDoubleArray(instr);
4199  } else {
4200    DoStoreKeyedFixedArray(instr);
4201  }
4202}
4203
4204
4205void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4206  DCHECK(ToRegister(instr->context()).is(esi));
4207  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4208  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4209  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4210
4211  EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4212
4213  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4214                        isolate(), instr->language_mode())
4215                        .code();
4216  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4217}
4218
4219
4220void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4221  Register object = ToRegister(instr->object());
4222  Register temp = ToRegister(instr->temp());
4223  Label no_memento_found;
4224  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4225  DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
4226  __ bind(&no_memento_found);
4227}
4228
4229
4230void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4231  class DeferredMaybeGrowElements final : public LDeferredCode {
4232   public:
4233    DeferredMaybeGrowElements(LCodeGen* codegen,
4234                              LMaybeGrowElements* instr,
4235                              const X87Stack& x87_stack)
4236        : LDeferredCode(codegen, x87_stack), instr_(instr) {}
4237    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4238    LInstruction* instr() override { return instr_; }
4239
4240   private:
4241    LMaybeGrowElements* instr_;
4242  };
4243
4244  Register result = eax;
4245  DeferredMaybeGrowElements* deferred =
4246      new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_);
4247  LOperand* key = instr->key();
4248  LOperand* current_capacity = instr->current_capacity();
4249
4250  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4251  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4252  DCHECK(key->IsConstantOperand() || key->IsRegister());
4253  DCHECK(current_capacity->IsConstantOperand() ||
4254         current_capacity->IsRegister());
4255
4256  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4257    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4258    int32_t constant_capacity =
4259        ToInteger32(LConstantOperand::cast(current_capacity));
4260    if (constant_key >= constant_capacity) {
4261      // Deferred case.
4262      __ jmp(deferred->entry());
4263    }
4264  } else if (key->IsConstantOperand()) {
4265    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4266    __ cmp(ToOperand(current_capacity), Immediate(constant_key));
4267    __ j(less_equal, deferred->entry());
4268  } else if (current_capacity->IsConstantOperand()) {
4269    int32_t constant_capacity =
4270        ToInteger32(LConstantOperand::cast(current_capacity));
4271    __ cmp(ToRegister(key), Immediate(constant_capacity));
4272    __ j(greater_equal, deferred->entry());
4273  } else {
4274    __ cmp(ToRegister(key), ToRegister(current_capacity));
4275    __ j(greater_equal, deferred->entry());
4276  }
4277
4278  __ mov(result, ToOperand(instr->elements()));
4279  __ bind(deferred->exit());
4280}
4281
4282
4283void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4284  // TODO(3095996): Get rid of this. For now, we need to make the
4285  // result register contain a valid pointer because it is already
4286  // contained in the register pointer map.
4287  Register result = eax;
4288  __ Move(result, Immediate(0));
4289
4290  // We have to call a stub.
4291  {
4292    PushSafepointRegistersScope scope(this);
4293    if (instr->object()->IsRegister()) {
4294      __ Move(result, ToRegister(instr->object()));
4295    } else {
4296      __ mov(result, ToOperand(instr->object()));
4297    }
4298
4299    LOperand* key = instr->key();
4300    if (key->IsConstantOperand()) {
4301      LConstantOperand* constant_key = LConstantOperand::cast(key);
4302      int32_t int_key = ToInteger32(constant_key);
4303      if (Smi::IsValid(int_key)) {
4304        __ mov(ebx, Immediate(Smi::FromInt(int_key)));
4305      } else {
4306        // We should never get here at runtime because there is a smi check on
4307        // the key before this point.
4308        __ int3();
4309      }
4310    } else {
4311      __ Move(ebx, ToRegister(key));
4312      __ SmiTag(ebx);
4313    }
4314
4315    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4316                               instr->hydrogen()->kind());
4317    __ CallStub(&stub);
4318    RecordSafepointWithLazyDeopt(
4319        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4320    __ StoreToSafepointRegisterSlot(result, result);
4321  }
4322
4323  // Deopt on smi, which means the elements array changed to dictionary mode.
4324  __ test(result, Immediate(kSmiTagMask));
4325  DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
4326}
4327
4328
4329void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4330  Register object_reg = ToRegister(instr->object());
4331
4332  Handle<Map> from_map = instr->original_map();
4333  Handle<Map> to_map = instr->transitioned_map();
4334  ElementsKind from_kind = instr->from_kind();
4335  ElementsKind to_kind = instr->to_kind();
4336
4337  Label not_applicable;
4338  bool is_simple_map_transition =
4339      IsSimpleMapChangeTransition(from_kind, to_kind);
4340  Label::Distance branch_distance =
4341      is_simple_map_transition ? Label::kNear : Label::kFar;
4342  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4343  __ j(not_equal, &not_applicable, branch_distance);
4344  if (is_simple_map_transition) {
4345    Register new_map_reg = ToRegister(instr->new_map_temp());
4346    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4347           Immediate(to_map));
4348    // Write barrier.
4349    DCHECK_NOT_NULL(instr->temp());
4350    __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4351                         ToRegister(instr->temp()), kDontSaveFPRegs);
4352  } else {
4353    DCHECK(ToRegister(instr->context()).is(esi));
4354    DCHECK(object_reg.is(eax));
4355    PushSafepointRegistersScope scope(this);
4356    __ mov(ebx, to_map);
4357    TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4358    __ CallStub(&stub);
4359    RecordSafepointWithLazyDeopt(instr,
4360        RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4361  }
4362  __ bind(&not_applicable);
4363}
4364
4365
4366void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4367  class DeferredStringCharCodeAt final : public LDeferredCode {
4368   public:
4369    DeferredStringCharCodeAt(LCodeGen* codegen,
4370                             LStringCharCodeAt* instr,
4371                             const X87Stack& x87_stack)
4372        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4373    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4374    LInstruction* instr() override { return instr_; }
4375
4376   private:
4377    LStringCharCodeAt* instr_;
4378  };
4379
4380  DeferredStringCharCodeAt* deferred =
4381      new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4382
4383  StringCharLoadGenerator::Generate(masm(),
4384                                    factory(),
4385                                    ToRegister(instr->string()),
4386                                    ToRegister(instr->index()),
4387                                    ToRegister(instr->result()),
4388                                    deferred->entry());
4389  __ bind(deferred->exit());
4390}
4391
4392
4393void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4394  Register string = ToRegister(instr->string());
4395  Register result = ToRegister(instr->result());
4396
4397  // TODO(3095996): Get rid of this. For now, we need to make the
4398  // result register contain a valid pointer because it is already
4399  // contained in the register pointer map.
4400  __ Move(result, Immediate(0));
4401
4402  PushSafepointRegistersScope scope(this);
4403  __ push(string);
4404  // Push the index as a smi. This is safe because of the checks in
4405  // DoStringCharCodeAt above.
4406  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4407  if (instr->index()->IsConstantOperand()) {
4408    Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4409                                      Representation::Smi());
4410    __ push(immediate);
4411  } else {
4412    Register index = ToRegister(instr->index());
4413    __ SmiTag(index);
4414    __ push(index);
4415  }
4416  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
4417                          instr, instr->context());
4418  __ AssertSmi(eax);
4419  __ SmiUntag(eax);
4420  __ StoreToSafepointRegisterSlot(result, eax);
4421}
4422
4423
4424void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4425  class DeferredStringCharFromCode final : public LDeferredCode {
4426   public:
4427    DeferredStringCharFromCode(LCodeGen* codegen,
4428                               LStringCharFromCode* instr,
4429                               const X87Stack& x87_stack)
4430        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4431    void Generate() override {
4432      codegen()->DoDeferredStringCharFromCode(instr_);
4433    }
4434    LInstruction* instr() override { return instr_; }
4435
4436   private:
4437    LStringCharFromCode* instr_;
4438  };
4439
4440  DeferredStringCharFromCode* deferred =
4441      new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4442
4443  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4444  Register char_code = ToRegister(instr->char_code());
4445  Register result = ToRegister(instr->result());
4446  DCHECK(!char_code.is(result));
4447
4448  __ cmp(char_code, String::kMaxOneByteCharCode);
4449  __ j(above, deferred->entry());
4450  __ Move(result, Immediate(factory()->single_character_string_cache()));
4451  __ mov(result, FieldOperand(result,
4452                              char_code, times_pointer_size,
4453                              FixedArray::kHeaderSize));
4454  __ cmp(result, factory()->undefined_value());
4455  __ j(equal, deferred->entry());
4456  __ bind(deferred->exit());
4457}
4458
4459
4460void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4461  Register char_code = ToRegister(instr->char_code());
4462  Register result = ToRegister(instr->result());
4463
4464  // TODO(3095996): Get rid of this. For now, we need to make the
4465  // result register contain a valid pointer because it is already
4466  // contained in the register pointer map.
4467  __ Move(result, Immediate(0));
4468
4469  PushSafepointRegistersScope scope(this);
4470  __ SmiTag(char_code);
4471  __ push(char_code);
4472  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4473                          instr->context());
4474  __ StoreToSafepointRegisterSlot(result, eax);
4475}
4476
4477
4478void LCodeGen::DoStringAdd(LStringAdd* instr) {
4479  DCHECK(ToRegister(instr->context()).is(esi));
4480  DCHECK(ToRegister(instr->left()).is(edx));
4481  DCHECK(ToRegister(instr->right()).is(eax));
4482  StringAddStub stub(isolate(),
4483                     instr->hydrogen()->flags(),
4484                     instr->hydrogen()->pretenure_flag());
4485  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4486}
4487
4488
4489void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4490  LOperand* input = instr->value();
4491  LOperand* output = instr->result();
4492  DCHECK(input->IsRegister() || input->IsStackSlot());
4493  DCHECK(output->IsDoubleRegister());
4494  if (input->IsRegister()) {
4495    Register input_reg = ToRegister(input);
4496    __ push(input_reg);
4497    X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4498    __ pop(input_reg);
4499  } else {
4500    X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4501  }
4502}
4503
4504
4505void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4506  LOperand* input = instr->value();
4507  LOperand* output = instr->result();
4508  X87Register res = ToX87Register(output);
4509  X87PrepareToWrite(res);
4510  __ LoadUint32NoSSE2(ToRegister(input));
4511  X87CommitWrite(res);
4512}
4513
4514
4515void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4516  class DeferredNumberTagI final : public LDeferredCode {
4517   public:
4518    DeferredNumberTagI(LCodeGen* codegen,
4519                       LNumberTagI* instr,
4520                       const X87Stack& x87_stack)
4521        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4522    void Generate() override {
4523      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4524                                       SIGNED_INT32);
4525    }
4526    LInstruction* instr() override { return instr_; }
4527
4528   private:
4529    LNumberTagI* instr_;
4530  };
4531
4532  LOperand* input = instr->value();
4533  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4534  Register reg = ToRegister(input);
4535
4536  DeferredNumberTagI* deferred =
4537      new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
4538  __ SmiTag(reg);
4539  __ j(overflow, deferred->entry());
4540  __ bind(deferred->exit());
4541}
4542
4543
4544void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4545  class DeferredNumberTagU final : public LDeferredCode {
4546   public:
4547    DeferredNumberTagU(LCodeGen* codegen,
4548                       LNumberTagU* instr,
4549                       const X87Stack& x87_stack)
4550        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4551    void Generate() override {
4552      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
4553                                       UNSIGNED_INT32);
4554    }
4555    LInstruction* instr() override { return instr_; }
4556
4557   private:
4558    LNumberTagU* instr_;
4559  };
4560
4561  LOperand* input = instr->value();
4562  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4563  Register reg = ToRegister(input);
4564
4565  DeferredNumberTagU* deferred =
4566      new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
4567  __ cmp(reg, Immediate(Smi::kMaxValue));
4568  __ j(above, deferred->entry());
4569  __ SmiTag(reg);
4570  __ bind(deferred->exit());
4571}
4572
4573
4574void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4575                                     LOperand* value,
4576                                     LOperand* temp,
4577                                     IntegerSignedness signedness) {
4578  Label done, slow;
4579  Register reg = ToRegister(value);
4580  Register tmp = ToRegister(temp);
4581
4582  if (signedness == SIGNED_INT32) {
4583    // There was overflow, so bits 30 and 31 of the original integer
4584    // disagree. Try to allocate a heap number in new space and store
4585    // the value in there. If that fails, call the runtime system.
4586    __ SmiUntag(reg);
4587    __ xor_(reg, 0x80000000);
4588    __ push(reg);
4589    __ fild_s(Operand(esp, 0));
4590    __ pop(reg);
4591  } else {
4592    // There's no fild variant for unsigned values, so zero-extend to a 64-bit
4593    // int manually.
4594    __ push(Immediate(0));
4595    __ push(reg);
4596    __ fild_d(Operand(esp, 0));
4597    __ pop(reg);
4598    __ pop(reg);
4599  }
4600
4601  if (FLAG_inline_new) {
4602    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4603    __ jmp(&done, Label::kNear);
4604  }
4605
4606  // Slow case: Call the runtime system to do the number allocation.
4607  __ bind(&slow);
4608  {
4609    // TODO(3095996): Put a valid pointer value in the stack slot where the
4610    // result register is stored, as this register is in the pointer map, but
4611    // contains an integer value.
4612    __ Move(reg, Immediate(0));
4613
4614    // Preserve the value of all registers.
4615    PushSafepointRegistersScope scope(this);
4616
4617    // NumberTagI and NumberTagD use the context from the frame, rather than
4618    // the environment's HContext or HInlinedContext value.
4619    // They only call Runtime::kAllocateHeapNumber.
4620    // The corresponding HChange instructions are added in a phase that does
4621    // not have easy access to the local context.
4622    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4623    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4624    RecordSafepointWithRegisters(
4625        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4626    __ StoreToSafepointRegisterSlot(reg, eax);
4627  }
4628
4629  __ bind(&done);
4630  __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4631}
4632
4633
4634void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4635  class DeferredNumberTagD final : public LDeferredCode {
4636   public:
4637    DeferredNumberTagD(LCodeGen* codegen,
4638                       LNumberTagD* instr,
4639                       const X87Stack& x87_stack)
4640        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4641    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4642    LInstruction* instr() override { return instr_; }
4643
4644   private:
4645    LNumberTagD* instr_;
4646  };
4647
4648  Register reg = ToRegister(instr->result());
4649
4650  // Put the value to the top of stack
4651  X87Register src = ToX87Register(instr->value());
4652  // Don't use X87LoadForUsage here, which is only used by Instruction which
4653  // clobbers fp registers.
4654  x87_stack_.Fxch(src);
4655
4656  DeferredNumberTagD* deferred =
4657      new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
4658  if (FLAG_inline_new) {
4659    Register tmp = ToRegister(instr->temp());
4660    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4661  } else {
4662    __ jmp(deferred->entry());
4663  }
4664  __ bind(deferred->exit());
4665  __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
4666}
4667
4668
4669void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4670  // TODO(3095996): Get rid of this. For now, we need to make the
4671  // result register contain a valid pointer because it is already
4672  // contained in the register pointer map.
4673  Register reg = ToRegister(instr->result());
4674  __ Move(reg, Immediate(0));
4675
4676  PushSafepointRegistersScope scope(this);
4677  // NumberTagI and NumberTagD use the context from the frame, rather than
4678  // the environment's HContext or HInlinedContext value.
4679  // They only call Runtime::kAllocateHeapNumber.
4680  // The corresponding HChange instructions are added in a phase that does
4681  // not have easy access to the local context.
4682  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4683  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4684  RecordSafepointWithRegisters(
4685      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4686  __ StoreToSafepointRegisterSlot(reg, eax);
4687}
4688
4689
4690void LCodeGen::DoSmiTag(LSmiTag* instr) {
4691  HChange* hchange = instr->hydrogen();
4692  Register input = ToRegister(instr->value());
4693  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4694      hchange->value()->CheckFlag(HValue::kUint32)) {
4695    __ test(input, Immediate(0xc0000000));
4696    DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
4697  }
4698  __ SmiTag(input);
4699  if (hchange->CheckFlag(HValue::kCanOverflow) &&
4700      !hchange->value()->CheckFlag(HValue::kUint32)) {
4701    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4702  }
4703}
4704
4705
4706void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4707  LOperand* input = instr->value();
4708  Register result = ToRegister(input);
4709  DCHECK(input->IsRegister() && input->Equals(instr->result()));
4710  if (instr->needs_check()) {
4711    __ test(result, Immediate(kSmiTagMask));
4712    DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
4713  } else {
4714    __ AssertSmi(result);
4715  }
4716  __ SmiUntag(result);
4717}
4718
4719
4720void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
4721                                      Register temp_reg, X87Register res_reg,
4722                                      NumberUntagDMode mode) {
4723  bool can_convert_undefined_to_nan =
4724      instr->hydrogen()->can_convert_undefined_to_nan();
4725  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4726
4727  Label load_smi, done;
4728
4729  X87PrepareToWrite(res_reg);
4730  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4731    // Smi check.
4732    __ JumpIfSmi(input_reg, &load_smi);
4733
4734    // Heap number map check.
4735    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4736           factory()->heap_number_map());
4737    if (!can_convert_undefined_to_nan) {
4738      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4739    } else {
4740      Label heap_number, convert;
4741      __ j(equal, &heap_number);
4742
4743      // Convert undefined (or hole) to NaN.
4744      __ cmp(input_reg, factory()->undefined_value());
4745      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
4746
4747      __ bind(&convert);
4748      __ push(Immediate(0xffffffff));
4749      __ push(Immediate(0x7fffffff));
4750      __ fld_d(MemOperand(esp, 0));
4751      __ lea(esp, Operand(esp, kDoubleSize));
4752      __ jmp(&done, Label::kNear);
4753
4754      __ bind(&heap_number);
4755    }
4756    // Heap number to x87 conversion.
4757    __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4758    if (deoptimize_on_minus_zero) {
4759      __ fldz();
4760      __ FCmp();
4761      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4762      __ j(not_zero, &done, Label::kNear);
4763
4764      // Use general purpose registers to check if we have -0.0
4765      __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
4766      __ test(temp_reg, Immediate(HeapNumber::kSignMask));
4767      __ j(zero, &done, Label::kNear);
4768
4769      // Pop FPU stack before deoptimizing.
4770      __ fstp(0);
4771      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4772    }
4773    __ jmp(&done, Label::kNear);
4774  } else {
4775    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4776  }
4777
4778  __ bind(&load_smi);
4779  // Clobbering a temp is faster than re-tagging the
4780  // input register since we avoid dependencies.
4781  __ mov(temp_reg, input_reg);
4782  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
4783  __ push(temp_reg);
4784  __ fild_s(Operand(esp, 0));
4785  __ add(esp, Immediate(kPointerSize));
4786  __ bind(&done);
4787  X87CommitWrite(res_reg);
4788}
4789
4790
4791void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4792  Register input_reg = ToRegister(instr->value());
4793
4794  // The input was optimistically untagged; revert it.
4795  STATIC_ASSERT(kSmiTagSize == 1);
4796  __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
4797
4798  if (instr->truncating()) {
4799    Label no_heap_number, check_bools, check_false;
4800
4801    // Heap number map check.
4802    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4803           factory()->heap_number_map());
4804    __ j(not_equal, &no_heap_number, Label::kNear);
4805    __ TruncateHeapNumberToI(input_reg, input_reg);
4806    __ jmp(done);
4807
4808    __ bind(&no_heap_number);
4809    // Check for Oddballs. Undefined/False is converted to zero and True to one
4810    // for truncating conversions.
4811    __ cmp(input_reg, factory()->undefined_value());
4812    __ j(not_equal, &check_bools, Label::kNear);
4813    __ Move(input_reg, Immediate(0));
4814    __ jmp(done);
4815
4816    __ bind(&check_bools);
4817    __ cmp(input_reg, factory()->true_value());
4818    __ j(not_equal, &check_false, Label::kNear);
4819    __ Move(input_reg, Immediate(1));
4820    __ jmp(done);
4821
4822    __ bind(&check_false);
4823    __ cmp(input_reg, factory()->false_value());
4824    DeoptimizeIf(not_equal, instr,
4825                 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4826    __ Move(input_reg, Immediate(0));
4827  } else {
4828    // TODO(olivf) Converting a number on the fpu is actually quite slow. We
4829    // should first try a fast conversion and then bailout to this slow case.
4830    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4831           isolate()->factory()->heap_number_map());
4832    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
4833
4834    __ sub(esp, Immediate(kPointerSize));
4835    __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
4836
4837    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4838      Label no_precision_lost, not_nan, zero_check;
4839      __ fld(0);
4840
4841      __ fist_s(MemOperand(esp, 0));
4842      __ fild_s(MemOperand(esp, 0));
4843      __ FCmp();
4844      __ pop(input_reg);
4845
4846      __ j(equal, &no_precision_lost, Label::kNear);
4847      __ fstp(0);
4848      DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
4849      __ bind(&no_precision_lost);
4850
4851      __ j(parity_odd, &not_nan);
4852      __ fstp(0);
4853      DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
4854      __ bind(&not_nan);
4855
4856      __ test(input_reg, Operand(input_reg));
4857      __ j(zero, &zero_check, Label::kNear);
4858      __ fstp(0);
4859      __ jmp(done);
4860
4861      __ bind(&zero_check);
4862      // To check for minus zero, we load the value again as float, and check
4863      // if that is still 0.
4864      __ sub(esp, Immediate(kPointerSize));
4865      __ fstp_s(Operand(esp, 0));
4866      __ pop(input_reg);
4867      __ test(input_reg, Operand(input_reg));
4868      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
4869    } else {
4870      __ fist_s(MemOperand(esp, 0));
4871      __ fild_s(MemOperand(esp, 0));
4872      __ FCmp();
4873      __ pop(input_reg);
4874      DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
4875      DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
4876    }
4877  }
4878}
4879
4880
4881void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4882  class DeferredTaggedToI final : public LDeferredCode {
4883   public:
4884    DeferredTaggedToI(LCodeGen* codegen,
4885                      LTaggedToI* instr,
4886                      const X87Stack& x87_stack)
4887        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4888    void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
4889    LInstruction* instr() override { return instr_; }
4890
4891   private:
4892    LTaggedToI* instr_;
4893  };
4894
4895  LOperand* input = instr->value();
4896  DCHECK(input->IsRegister());
4897  Register input_reg = ToRegister(input);
4898  DCHECK(input_reg.is(ToRegister(instr->result())));
4899
4900  if (instr->hydrogen()->value()->representation().IsSmi()) {
4901    __ SmiUntag(input_reg);
4902  } else {
4903    DeferredTaggedToI* deferred =
4904        new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
4905    // Optimistically untag the input.
4906    // If the input is a HeapObject, SmiUntag will set the carry flag.
4907    STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4908    __ SmiUntag(input_reg);
4909    // Branch to deferred code if the input was tagged.
4910    // The deferred code will take care of restoring the tag.
4911    __ j(carry, deferred->entry());
4912    __ bind(deferred->exit());
4913  }
4914}
4915
4916
4917void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4918  LOperand* input = instr->value();
4919  DCHECK(input->IsRegister());
4920  LOperand* temp = instr->temp();
4921  DCHECK(temp->IsRegister());
4922  LOperand* result = instr->result();
4923  DCHECK(result->IsDoubleRegister());
4924
4925  Register input_reg = ToRegister(input);
4926  Register temp_reg = ToRegister(temp);
4927
4928  HValue* value = instr->hydrogen()->value();
4929  NumberUntagDMode mode = value->representation().IsSmi()
4930      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4931
4932  EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
4933                         mode);
4934}
4935
4936
4937void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4938  LOperand* input = instr->value();
4939  DCHECK(input->IsDoubleRegister());
4940  LOperand* result = instr->result();
4941  DCHECK(result->IsRegister());
4942  Register result_reg = ToRegister(result);
4943
4944  if (instr->truncating()) {
4945    X87Register input_reg = ToX87Register(input);
4946    X87Fxch(input_reg);
4947    __ TruncateX87TOSToI(result_reg);
4948  } else {
4949    Label lost_precision, is_nan, minus_zero, done;
4950    X87Register input_reg = ToX87Register(input);
4951    X87Fxch(input_reg);
4952    __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
4953                 &lost_precision, &is_nan, &minus_zero);
4954    __ jmp(&done);
4955    __ bind(&lost_precision);
4956    DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
4957    __ bind(&is_nan);
4958    DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
4959    __ bind(&minus_zero);
4960    DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
4961    __ bind(&done);
4962  }
4963}
4964
4965
4966void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4967  LOperand* input = instr->value();
4968  DCHECK(input->IsDoubleRegister());
4969  LOperand* result = instr->result();
4970  DCHECK(result->IsRegister());
4971  Register result_reg = ToRegister(result);
4972
4973  Label lost_precision, is_nan, minus_zero, done;
4974  X87Register input_reg = ToX87Register(input);
4975  X87Fxch(input_reg);
4976  __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
4977               &lost_precision, &is_nan, &minus_zero);
4978  __ jmp(&done);
4979  __ bind(&lost_precision);
4980  DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
4981  __ bind(&is_nan);
4982  DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
4983  __ bind(&minus_zero);
4984  DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
4985  __ bind(&done);
4986  __ SmiTag(result_reg);
4987  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
4988}
4989
4990
4991void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4992  LOperand* input = instr->value();
4993  __ test(ToOperand(input), Immediate(kSmiTagMask));
4994  DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
4995}
4996
4997
4998void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4999  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5000    LOperand* input = instr->value();
5001    __ test(ToOperand(input), Immediate(kSmiTagMask));
5002    DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
5003  }
5004}
5005
5006
5007void LCodeGen::DoCheckArrayBufferNotNeutered(
5008    LCheckArrayBufferNotNeutered* instr) {
5009  Register view = ToRegister(instr->view());
5010  Register scratch = ToRegister(instr->scratch());
5011
5012  __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
5013  __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
5014            Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
5015  DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
5016}
5017
5018
5019void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5020  Register input = ToRegister(instr->value());
5021  Register temp = ToRegister(instr->temp());
5022
5023  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5024
5025  if (instr->hydrogen()->is_interval_check()) {
5026    InstanceType first;
5027    InstanceType last;
5028    instr->hydrogen()->GetCheckInterval(&first, &last);
5029
5030    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
5031
5032    // If there is only one type in the interval check for equality.
5033    if (first == last) {
5034      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5035    } else {
5036      DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
5037      // Omit check for the last type.
5038      if (last != LAST_TYPE) {
5039        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
5040        DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
5041      }
5042    }
5043  } else {
5044    uint8_t mask;
5045    uint8_t tag;
5046    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5047
5048    if (base::bits::IsPowerOfTwo32(mask)) {
5049      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5050      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
5051      DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
5052                   Deoptimizer::kWrongInstanceType);
5053    } else {
5054      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5055      __ and_(temp, mask);
5056      __ cmp(temp, tag);
5057      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
5058    }
5059  }
5060}
5061
5062
5063void LCodeGen::DoCheckValue(LCheckValue* instr) {
5064  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5065  if (instr->hydrogen()->object_in_new_space()) {
5066    Register reg = ToRegister(instr->value());
5067    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5068    __ cmp(reg, Operand::ForCell(cell));
5069  } else {
5070    Operand operand = ToOperand(instr->value());
5071    __ cmp(operand, object);
5072  }
5073  DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
5074}
5075
5076
5077void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5078  {
5079    PushSafepointRegistersScope scope(this);
5080    __ push(object);
5081    __ xor_(esi, esi);
5082    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5083    RecordSafepointWithRegisters(
5084        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5085
5086    __ test(eax, Immediate(kSmiTagMask));
5087  }
5088  DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
5089}
5090
5091
5092void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5093  class DeferredCheckMaps final : public LDeferredCode {
5094   public:
5095    DeferredCheckMaps(LCodeGen* codegen,
5096                      LCheckMaps* instr,
5097                      Register object,
5098                      const X87Stack& x87_stack)
5099        : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5100      SetExit(check_maps());
5101    }
5102    void Generate() override {
5103      codegen()->DoDeferredInstanceMigration(instr_, object_);
5104    }
5105    Label* check_maps() { return &check_maps_; }
5106    LInstruction* instr() override { return instr_; }
5107
5108   private:
5109    LCheckMaps* instr_;
5110    Label check_maps_;
5111    Register object_;
5112  };
5113
5114  if (instr->hydrogen()->IsStabilityCheck()) {
5115    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5116    for (int i = 0; i < maps->size(); ++i) {
5117      AddStabilityDependency(maps->at(i).handle());
5118    }
5119    return;
5120  }
5121
5122  LOperand* input = instr->value();
5123  DCHECK(input->IsRegister());
5124  Register reg = ToRegister(input);
5125
5126  DeferredCheckMaps* deferred = NULL;
5127  if (instr->hydrogen()->HasMigrationTarget()) {
5128    deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5129    __ bind(deferred->check_maps());
5130  }
5131
5132  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5133  Label success;
5134  for (int i = 0; i < maps->size() - 1; i++) {
5135    Handle<Map> map = maps->at(i).handle();
5136    __ CompareMap(reg, map);
5137    __ j(equal, &success, Label::kNear);
5138  }
5139
5140  Handle<Map> map = maps->at(maps->size() - 1).handle();
5141  __ CompareMap(reg, map);
5142  if (instr->hydrogen()->HasMigrationTarget()) {
5143    __ j(not_equal, deferred->entry());
5144  } else {
5145    DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5146  }
5147
5148  __ bind(&success);
5149}
5150
5151
5152void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5153  X87Register value_reg = ToX87Register(instr->unclamped());
5154  Register result_reg = ToRegister(instr->result());
5155  X87Fxch(value_reg);
5156  __ ClampTOSToUint8(result_reg);
5157}
5158
5159
5160void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5161  DCHECK(instr->unclamped()->Equals(instr->result()));
5162  Register value_reg = ToRegister(instr->result());
5163  __ ClampUint8(value_reg);
5164}
5165
5166
5167void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5168  Register input_reg = ToRegister(instr->unclamped());
5169  Register result_reg = ToRegister(instr->result());
5170  Register scratch = ToRegister(instr->scratch());
5171  Register scratch2 = ToRegister(instr->scratch2());
5172  Register scratch3 = ToRegister(instr->scratch3());
5173  Label is_smi, done, heap_number, valid_exponent,
5174      largest_value, zero_result, maybe_nan_or_infinity;
5175
5176  __ JumpIfSmi(input_reg, &is_smi);
5177
5178  // Check for heap number
5179  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5180         factory()->heap_number_map());
5181  __ j(equal, &heap_number, Label::kNear);
5182
5183  // Check for undefined. Undefined is converted to zero for clamping
5184  // conversions.
5185  __ cmp(input_reg, factory()->undefined_value());
5186  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
5187  __ jmp(&zero_result, Label::kNear);
5188
5189  // Heap number
5190  __ bind(&heap_number);
5191
5192  // Surprisingly, all of the hand-crafted bit-manipulations below are much
5193  // faster than the x86 FPU built-in instruction, especially since "banker's
5194  // rounding" would be additionally very expensive
5195
5196  // Get exponent word.
5197  __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5198  __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5199
5200  // Test for negative values --> clamp to zero
5201  __ test(scratch, scratch);
5202  __ j(negative, &zero_result, Label::kNear);
5203
5204  // Get exponent alone in scratch2.
5205  __ mov(scratch2, scratch);
5206  __ and_(scratch2, HeapNumber::kExponentMask);
5207  __ shr(scratch2, HeapNumber::kExponentShift);
5208  __ j(zero, &zero_result, Label::kNear);
5209  __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5210  __ j(negative, &zero_result, Label::kNear);
5211
5212  const uint32_t non_int8_exponent = 7;
5213  __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5214  // If the exponent is too big, check for special values.
5215  __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5216
5217  __ bind(&valid_exponent);
5218  // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5219  // < 7. The shift bias is the number of bits to shift the mantissa such that
5220  // with an exponent of 7 such the that top-most one is in bit 30, allowing
5221  // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5222  // 1).
5223  int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5224  __ lea(result_reg, MemOperand(scratch2, shift_bias));
5225  // Here result_reg (ecx) is the shift, scratch is the exponent word.  Get the
5226  // top bits of the mantissa.
5227  __ and_(scratch, HeapNumber::kMantissaMask);
5228  // Put back the implicit 1 of the mantissa
5229  __ or_(scratch, 1 << HeapNumber::kExponentShift);
5230  // Shift up to round
5231  __ shl_cl(scratch);
5232  // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5233  // use the bit in the "ones" place and add it to the "halves" place, which has
5234  // the effect of rounding to even.
5235  __ mov(scratch2, scratch);
5236  const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5237  const uint32_t one_bit_shift = one_half_bit_shift + 1;
5238  __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5239  __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5240  Label no_round;
5241  __ j(less, &no_round, Label::kNear);
5242  Label round_up;
5243  __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5244  __ j(greater, &round_up, Label::kNear);
5245  __ test(scratch3, scratch3);
5246  __ j(not_zero, &round_up, Label::kNear);
5247  __ mov(scratch2, scratch);
5248  __ and_(scratch2, Immediate(1 << one_bit_shift));
5249  __ shr(scratch2, 1);
5250  __ bind(&round_up);
5251  __ add(scratch, scratch2);
5252  __ j(overflow, &largest_value, Label::kNear);
5253  __ bind(&no_round);
5254  __ shr(scratch, 23);
5255  __ mov(result_reg, scratch);
5256  __ jmp(&done, Label::kNear);
5257
5258  __ bind(&maybe_nan_or_infinity);
5259  // Check for NaN/Infinity, all other values map to 255
5260  __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5261  __ j(not_equal, &largest_value, Label::kNear);
5262
5263  // Check for NaN, which differs from Infinity in that at least one mantissa
5264  // bit is set.
5265  __ and_(scratch, HeapNumber::kMantissaMask);
5266  __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5267  __ j(not_zero, &zero_result, Label::kNear);  // M!=0 --> NaN
5268  // Infinity -> Fall through to map to 255.
5269
5270  __ bind(&largest_value);
5271  __ mov(result_reg, Immediate(255));
5272  __ jmp(&done, Label::kNear);
5273
5274  __ bind(&zero_result);
5275  __ xor_(result_reg, result_reg);
5276  __ jmp(&done, Label::kNear);
5277
5278  // smi
5279  __ bind(&is_smi);
5280  if (!input_reg.is(result_reg)) {
5281    __ mov(result_reg, input_reg);
5282  }
5283  __ SmiUntag(result_reg);
5284  __ ClampUint8(result_reg);
5285  __ bind(&done);
5286}
5287
5288
5289void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5290  X87Register value_reg = ToX87Register(instr->value());
5291  Register result_reg = ToRegister(instr->result());
5292  X87Fxch(value_reg);
5293  __ sub(esp, Immediate(kDoubleSize));
5294  __ fst_d(Operand(esp, 0));
5295  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5296    __ mov(result_reg, Operand(esp, kPointerSize));
5297  } else {
5298    __ mov(result_reg, Operand(esp, 0));
5299  }
5300  __ add(esp, Immediate(kDoubleSize));
5301}
5302
5303
5304void LCodeGen::DoAllocate(LAllocate* instr) {
5305  class DeferredAllocate final : public LDeferredCode {
5306   public:
5307    DeferredAllocate(LCodeGen* codegen,
5308                     LAllocate* instr,
5309                     const X87Stack& x87_stack)
5310        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5311    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5312    LInstruction* instr() override { return instr_; }
5313
5314   private:
5315    LAllocate* instr_;
5316  };
5317
5318  DeferredAllocate* deferred =
5319      new(zone()) DeferredAllocate(this, instr, x87_stack_);
5320
5321  Register result = ToRegister(instr->result());
5322  Register temp = ToRegister(instr->temp());
5323
5324  // Allocate memory for the object.
5325  AllocationFlags flags = NO_ALLOCATION_FLAGS;
5326  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5327    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5328  }
5329  if (instr->hydrogen()->IsOldSpaceAllocation()) {
5330    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5331    flags = static_cast<AllocationFlags>(flags | PRETENURE);
5332  }
5333
5334  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5335    flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5336  }
5337  DCHECK(!instr->hydrogen()->IsAllocationFolded());
5338
5339  if (instr->size()->IsConstantOperand()) {
5340    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5341    CHECK(size <= Page::kMaxRegularHeapObjectSize);
5342    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5343  } else {
5344    Register size = ToRegister(instr->size());
5345    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5346  }
5347
5348  __ bind(deferred->exit());
5349
5350  if (instr->hydrogen()->MustPrefillWithFiller()) {
5351    if (instr->size()->IsConstantOperand()) {
5352      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5353      __ mov(temp, (size / kPointerSize) - 1);
5354    } else {
5355      temp = ToRegister(instr->size());
5356      __ shr(temp, kPointerSizeLog2);
5357      __ dec(temp);
5358    }
5359    Label loop;
5360    __ bind(&loop);
5361    __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5362        isolate()->factory()->one_pointer_filler_map());
5363    __ dec(temp);
5364    __ j(not_zero, &loop);
5365  }
5366}
5367
5368void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5369  DCHECK(instr->hydrogen()->IsAllocationFolded());
5370  DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5371  Register result = ToRegister(instr->result());
5372  Register temp = ToRegister(instr->temp());
5373
5374  AllocationFlags flags = ALLOCATION_FOLDED;
5375  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5376    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5377  }
5378  if (instr->hydrogen()->IsOldSpaceAllocation()) {
5379    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5380    flags = static_cast<AllocationFlags>(flags | PRETENURE);
5381  }
5382  if (instr->size()->IsConstantOperand()) {
5383    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5384    CHECK(size <= Page::kMaxRegularHeapObjectSize);
5385    __ FastAllocate(size, result, temp, flags);
5386  } else {
5387    Register size = ToRegister(instr->size());
5388    __ FastAllocate(size, result, temp, flags);
5389  }
5390}
5391
5392void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5393  Register result = ToRegister(instr->result());
5394
5395  // TODO(3095996): Get rid of this. For now, we need to make the
5396  // result register contain a valid pointer because it is already
5397  // contained in the register pointer map.
5398  __ Move(result, Immediate(Smi::FromInt(0)));
5399
5400  PushSafepointRegistersScope scope(this);
5401  if (instr->size()->IsRegister()) {
5402    Register size = ToRegister(instr->size());
5403    DCHECK(!size.is(result));
5404    __ SmiTag(ToRegister(instr->size()));
5405    __ push(size);
5406  } else {
5407    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5408    if (size >= 0 && size <= Smi::kMaxValue) {
5409      __ push(Immediate(Smi::FromInt(size)));
5410    } else {
5411      // We should never get here at runtime => abort
5412      __ int3();
5413      return;
5414    }
5415  }
5416
5417  int flags = AllocateDoubleAlignFlag::encode(
5418      instr->hydrogen()->MustAllocateDoubleAligned());
5419  if (instr->hydrogen()->IsOldSpaceAllocation()) {
5420    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5421    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5422  } else {
5423    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5424  }
5425  __ push(Immediate(Smi::FromInt(flags)));
5426
5427  CallRuntimeFromDeferred(
5428      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5429  __ StoreToSafepointRegisterSlot(result, eax);
5430
5431  if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5432    AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5433    if (instr->hydrogen()->IsOldSpaceAllocation()) {
5434      DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5435      allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5436    }
5437    // If the allocation folding dominator allocate triggered a GC, allocation
5438    // happend in the runtime. We have to reset the top pointer to virtually
5439    // undo the allocation.
5440    ExternalReference allocation_top =
5441        AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5442    __ sub(eax, Immediate(kHeapObjectTag));
5443    __ mov(Operand::StaticVariable(allocation_top), eax);
5444    __ add(eax, Immediate(kHeapObjectTag));
5445  }
5446}
5447
5448
5449void LCodeGen::DoTypeof(LTypeof* instr) {
5450  DCHECK(ToRegister(instr->context()).is(esi));
5451  DCHECK(ToRegister(instr->value()).is(ebx));
5452  Label end, do_call;
5453  Register value_register = ToRegister(instr->value());
5454  __ JumpIfNotSmi(value_register, &do_call);
5455  __ mov(eax, Immediate(isolate()->factory()->number_string()));
5456  __ jmp(&end);
5457  __ bind(&do_call);
5458  TypeofStub stub(isolate());
5459  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5460  __ bind(&end);
5461}
5462
5463
5464void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5465  Register input = ToRegister(instr->value());
5466  Condition final_branch_condition = EmitTypeofIs(instr, input);
5467  if (final_branch_condition != no_condition) {
5468    EmitBranch(instr, final_branch_condition);
5469  }
5470}
5471
5472
5473Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5474  Label* true_label = instr->TrueLabel(chunk_);
5475  Label* false_label = instr->FalseLabel(chunk_);
5476  Handle<String> type_name = instr->type_literal();
5477  int left_block = instr->TrueDestination(chunk_);
5478  int right_block = instr->FalseDestination(chunk_);
5479  int next_block = GetNextEmittedBlock();
5480
5481  Label::Distance true_distance = left_block == next_block ? Label::kNear
5482                                                           : Label::kFar;
5483  Label::Distance false_distance = right_block == next_block ? Label::kNear
5484                                                             : Label::kFar;
5485  Condition final_branch_condition = no_condition;
5486  if (String::Equals(type_name, factory()->number_string())) {
5487    __ JumpIfSmi(input, true_label, true_distance);
5488    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
5489           factory()->heap_number_map());
5490    final_branch_condition = equal;
5491
5492  } else if (String::Equals(type_name, factory()->string_string())) {
5493    __ JumpIfSmi(input, false_label, false_distance);
5494    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5495    final_branch_condition = below;
5496
5497  } else if (String::Equals(type_name, factory()->symbol_string())) {
5498    __ JumpIfSmi(input, false_label, false_distance);
5499    __ CmpObjectType(input, SYMBOL_TYPE, input);
5500    final_branch_condition = equal;
5501
5502  } else if (String::Equals(type_name, factory()->boolean_string())) {
5503    __ cmp(input, factory()->true_value());
5504    __ j(equal, true_label, true_distance);
5505    __ cmp(input, factory()->false_value());
5506    final_branch_condition = equal;
5507
5508  } else if (String::Equals(type_name, factory()->undefined_string())) {
5509    __ cmp(input, factory()->null_value());
5510    __ j(equal, false_label, false_distance);
5511    __ JumpIfSmi(input, false_label, false_distance);
5512    // Check for undetectable objects => true.
5513    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5514    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5515              Immediate(1 << Map::kIsUndetectable));
5516    final_branch_condition = not_zero;
5517
5518  } else if (String::Equals(type_name, factory()->function_string())) {
5519    __ JumpIfSmi(input, false_label, false_distance);
5520    // Check for callable and not undetectable objects => true.
5521    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
5522    __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
5523    __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
5524    __ cmp(input, 1 << Map::kIsCallable);
5525    final_branch_condition = equal;
5526
5527  } else if (String::Equals(type_name, factory()->object_string())) {
5528    __ JumpIfSmi(input, false_label, false_distance);
5529    __ cmp(input, factory()->null_value());
5530    __ j(equal, true_label, true_distance);
5531    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5532    __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
5533    __ j(below, false_label, false_distance);
5534    // Check for callable or undetectable objects => false.
5535    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
5536              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5537    final_branch_condition = zero;
5538
5539// clang-format off
5540#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
5541  } else if (String::Equals(type_name, factory()->type##_string())) { \
5542    __ JumpIfSmi(input, false_label, false_distance);                 \
5543    __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
5544           factory()->type##_map());                                  \
5545    final_branch_condition = equal;
5546  SIMD128_TYPES(SIMD128_TYPE)
5547#undef SIMD128_TYPE
5548    // clang-format on
5549
5550  } else {
5551    __ jmp(false_label, false_distance);
5552  }
5553  return final_branch_condition;
5554}
5555
5556
5557void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5558  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5559    // Ensure that we have enough space after the previous lazy-bailout
5560    // instruction for patching the code here.
5561    int current_pc = masm()->pc_offset();
5562    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5563      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5564      __ Nop(padding_size);
5565    }
5566  }
5567  last_lazy_deopt_pc_ = masm()->pc_offset();
5568}
5569
5570
5571void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5572  last_lazy_deopt_pc_ = masm()->pc_offset();
5573  DCHECK(instr->HasEnvironment());
5574  LEnvironment* env = instr->environment();
5575  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5576  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5577}
5578
5579
5580void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5581  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5582  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5583  // needed return address), even though the implementation of LAZY and EAGER is
5584  // now identical. When LAZY is eventually completely folded into EAGER, remove
5585  // the special case below.
5586  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5587    type = Deoptimizer::LAZY;
5588  }
5589  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5590}
5591
5592
5593void LCodeGen::DoDummy(LDummy* instr) {
5594  // Nothing to see here, move on!
5595}
5596
5597
5598void LCodeGen::DoDummyUse(LDummyUse* instr) {
5599  // Nothing to see here, move on!
5600}
5601
5602
5603void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5604  PushSafepointRegistersScope scope(this);
5605  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5606  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5607  RecordSafepointWithLazyDeopt(
5608      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5609  DCHECK(instr->HasEnvironment());
5610  LEnvironment* env = instr->environment();
5611  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5612}
5613
5614
5615void LCodeGen::DoStackCheck(LStackCheck* instr) {
5616  class DeferredStackCheck final : public LDeferredCode {
5617   public:
5618    DeferredStackCheck(LCodeGen* codegen,
5619                       LStackCheck* instr,
5620                       const X87Stack& x87_stack)
5621        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5622    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5623    LInstruction* instr() override { return instr_; }
5624
5625   private:
5626    LStackCheck* instr_;
5627  };
5628
5629  DCHECK(instr->HasEnvironment());
5630  LEnvironment* env = instr->environment();
5631  // There is no LLazyBailout instruction for stack-checks. We have to
5632  // prepare for lazy deoptimization explicitly here.
5633  if (instr->hydrogen()->is_function_entry()) {
5634    // Perform stack overflow check.
5635    Label done;
5636    ExternalReference stack_limit =
5637        ExternalReference::address_of_stack_limit(isolate());
5638    __ cmp(esp, Operand::StaticVariable(stack_limit));
5639    __ j(above_equal, &done, Label::kNear);
5640
5641    DCHECK(instr->context()->IsRegister());
5642    DCHECK(ToRegister(instr->context()).is(esi));
5643    CallCode(isolate()->builtins()->StackCheck(),
5644             RelocInfo::CODE_TARGET,
5645             instr);
5646    __ bind(&done);
5647  } else {
5648    DCHECK(instr->hydrogen()->is_backwards_branch());
5649    // Perform stack overflow check if this goto needs it before jumping.
5650    DeferredStackCheck* deferred_stack_check =
5651        new(zone()) DeferredStackCheck(this, instr, x87_stack_);
5652    ExternalReference stack_limit =
5653        ExternalReference::address_of_stack_limit(isolate());
5654    __ cmp(esp, Operand::StaticVariable(stack_limit));
5655    __ j(below, deferred_stack_check->entry());
5656    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5657    __ bind(instr->done_label());
5658    deferred_stack_check->SetExit(instr->done_label());
5659    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5660    // Don't record a deoptimization index for the safepoint here.
5661    // This will be done explicitly when emitting call and the safepoint in
5662    // the deferred code.
5663  }
5664}
5665
5666
5667void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5668  // This is a pseudo-instruction that ensures that the environment here is
5669  // properly registered for deoptimization and records the assembler's PC
5670  // offset.
5671  LEnvironment* environment = instr->environment();
5672
5673  // If the environment were already registered, we would have no way of
5674  // backpatching it with the spill slot operands.
5675  DCHECK(!environment->HasBeenRegistered());
5676  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5677
5678  GenerateOsrPrologue();
5679}
5680
5681
5682void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5683  DCHECK(ToRegister(instr->context()).is(esi));
5684
5685  Label use_cache, call_runtime;
5686  __ CheckEnumCache(&call_runtime);
5687
5688  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
5689  __ jmp(&use_cache, Label::kNear);
5690
5691  // Get the set of properties to enumerate.
5692  __ bind(&call_runtime);
5693  __ push(eax);
5694  CallRuntime(Runtime::kForInEnumerate, instr);
5695  __ bind(&use_cache);
5696}
5697
5698
5699void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5700  Register map = ToRegister(instr->map());
5701  Register result = ToRegister(instr->result());
5702  Label load_cache, done;
5703  __ EnumLength(result, map);
5704  __ cmp(result, Immediate(Smi::FromInt(0)));
5705  __ j(not_equal, &load_cache, Label::kNear);
5706  __ mov(result, isolate()->factory()->empty_fixed_array());
5707  __ jmp(&done, Label::kNear);
5708
5709  __ bind(&load_cache);
5710  __ LoadInstanceDescriptors(map, result);
5711  __ mov(result,
5712         FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5713  __ mov(result,
5714         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5715  __ bind(&done);
5716  __ test(result, result);
5717  DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
5718}
5719
5720
5721void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5722  Register object = ToRegister(instr->value());
5723  __ cmp(ToRegister(instr->map()),
5724         FieldOperand(object, HeapObject::kMapOffset));
5725  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
5726}
5727
5728
5729void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5730                                           Register object,
5731                                           Register index) {
5732  PushSafepointRegistersScope scope(this);
5733  __ push(object);
5734  __ push(index);
5735  __ xor_(esi, esi);
5736  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5737  RecordSafepointWithRegisters(
5738      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5739  __ StoreToSafepointRegisterSlot(object, eax);
5740}
5741
5742
5743void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5744  class DeferredLoadMutableDouble final : public LDeferredCode {
5745   public:
5746    DeferredLoadMutableDouble(LCodeGen* codegen,
5747                              LLoadFieldByIndex* instr,
5748                              Register object,
5749                              Register index,
5750                              const X87Stack& x87_stack)
5751        : LDeferredCode(codegen, x87_stack),
5752          instr_(instr),
5753          object_(object),
5754          index_(index) {
5755    }
5756    void Generate() override {
5757      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5758    }
5759    LInstruction* instr() override { return instr_; }
5760
5761   private:
5762    LLoadFieldByIndex* instr_;
5763    Register object_;
5764    Register index_;
5765  };
5766
5767  Register object = ToRegister(instr->object());
5768  Register index = ToRegister(instr->index());
5769
5770  DeferredLoadMutableDouble* deferred;
5771  deferred = new(zone()) DeferredLoadMutableDouble(
5772      this, instr, object, index, x87_stack_);
5773
5774  Label out_of_object, done;
5775  __ test(index, Immediate(Smi::FromInt(1)));
5776  __ j(not_zero, deferred->entry());
5777
5778  __ sar(index, 1);
5779
5780  __ cmp(index, Immediate(0));
5781  __ j(less, &out_of_object, Label::kNear);
5782  __ mov(object, FieldOperand(object,
5783                              index,
5784                              times_half_pointer_size,
5785                              JSObject::kHeaderSize));
5786  __ jmp(&done, Label::kNear);
5787
5788  __ bind(&out_of_object);
5789  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5790  __ neg(index);
5791  // Index is now equal to out of object property index plus 1.
5792  __ mov(object, FieldOperand(object,
5793                              index,
5794                              times_half_pointer_size,
5795                              FixedArray::kHeaderSize - kPointerSize));
5796  __ bind(deferred->exit());
5797  __ bind(&done);
5798}
5799
5800#undef __
5801
5802}  // namespace internal
5803}  // namespace v8
5804
5805#endif  // V8_TARGET_ARCH_X87
5806