1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if V8_TARGET_ARCH_IA32
31
32#include "ia32/lithium-codegen-ia32.h"
33#include "ic.h"
34#include "code-stubs.h"
35#include "deoptimizer.h"
36#include "stub-cache.h"
37#include "codegen.h"
38#include "hydrogen-osr.h"
39
40namespace v8 {
41namespace internal {
42
43
44static SaveFPRegsMode GetSaveFPRegsMode() {
45  // We don't need to save floating point regs when generating the snapshot
46  return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
47}
48
49
50// When invoking builtins, we need to record the safepoint in the middle of
51// the invoke instruction sequence generated by the macro assembler.
52class SafepointGenerator V8_FINAL : public CallWrapper {
53 public:
54  SafepointGenerator(LCodeGen* codegen,
55                     LPointerMap* pointers,
56                     Safepoint::DeoptMode mode)
57      : codegen_(codegen),
58        pointers_(pointers),
59        deopt_mode_(mode) {}
60  virtual ~SafepointGenerator() {}
61
62  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
63
64  virtual void AfterCall() const V8_OVERRIDE {
65    codegen_->RecordSafepoint(pointers_, deopt_mode_);
66  }
67
68 private:
69  LCodeGen* codegen_;
70  LPointerMap* pointers_;
71  Safepoint::DeoptMode deopt_mode_;
72};
73
74
75#define __ masm()->
76
77bool LCodeGen::GenerateCode() {
78  LPhase phase("Z_Code generation", chunk());
79  ASSERT(is_unused());
80  status_ = GENERATING;
81
82  // Open a frame scope to indicate that there is a frame on the stack.  The
83  // MANUAL indicates that the scope shouldn't actually generate code to set up
84  // the frame (that is done in GeneratePrologue).
85  FrameScope frame_scope(masm_, StackFrame::MANUAL);
86
87  support_aligned_spilled_doubles_ = info()->IsOptimizing();
88
89  dynamic_frame_alignment_ = info()->IsOptimizing() &&
90      ((chunk()->num_double_slots() > 2 &&
91        !chunk()->graph()->is_recursive()) ||
92       !info()->osr_ast_id().IsNone());
93
94  return GeneratePrologue() &&
95      GenerateBody() &&
96      GenerateDeferredCode() &&
97      GenerateJumpTable() &&
98      GenerateSafepointTable();
99}
100
101
102void LCodeGen::FinishCode(Handle<Code> code) {
103  ASSERT(is_done());
104  code->set_stack_slots(GetStackSlotCount());
105  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
106  RegisterDependentCodeForEmbeddedMaps(code);
107  PopulateDeoptimizationData(code);
108  if (!info()->IsStub()) {
109    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
110  }
111  info()->CommitDependencies(code);
112}
113
114
115void LCodeGen::Abort(BailoutReason reason) {
116  info()->set_bailout_reason(reason);
117  status_ = ABORTED;
118}
119
120
121#ifdef _MSC_VER
122void LCodeGen::MakeSureStackPagesMapped(int offset) {
123  const int kPageSize = 4 * KB;
124  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
125    __ mov(Operand(esp, offset), eax);
126  }
127}
128#endif
129
130
131void LCodeGen::SaveCallerDoubles() {
132  ASSERT(info()->saves_caller_doubles());
133  ASSERT(NeedsEagerFrame());
134  Comment(";;; Save clobbered callee double registers");
135  CpuFeatureScope scope(masm(), SSE2);
136  int count = 0;
137  BitVector* doubles = chunk()->allocated_double_registers();
138  BitVector::Iterator save_iterator(doubles);
139  while (!save_iterator.Done()) {
140    __ movsd(MemOperand(esp, count * kDoubleSize),
141              XMMRegister::FromAllocationIndex(save_iterator.Current()));
142    save_iterator.Advance();
143    count++;
144  }
145}
146
147
148void LCodeGen::RestoreCallerDoubles() {
149  ASSERT(info()->saves_caller_doubles());
150  ASSERT(NeedsEagerFrame());
151  Comment(";;; Restore clobbered callee double registers");
152  CpuFeatureScope scope(masm(), SSE2);
153  BitVector* doubles = chunk()->allocated_double_registers();
154  BitVector::Iterator save_iterator(doubles);
155  int count = 0;
156  while (!save_iterator.Done()) {
157    __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
158              MemOperand(esp, count * kDoubleSize));
159    save_iterator.Advance();
160    count++;
161  }
162}
163
164
165bool LCodeGen::GeneratePrologue() {
166  ASSERT(is_generating());
167
168  if (info()->IsOptimizing()) {
169    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
170
171#ifdef DEBUG
172    if (strlen(FLAG_stop_at) > 0 &&
173        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
174      __ int3();
175    }
176#endif
177
178    // Strict mode functions and builtins need to replace the receiver
179    // with undefined when called as functions (without an explicit
180    // receiver object). ecx is zero for method calls and non-zero for
181    // function calls.
182    if (!info_->is_classic_mode() || info_->is_native()) {
183      Label ok;
184      __ test(ecx, Operand(ecx));
185      __ j(zero, &ok, Label::kNear);
186      // +1 for return address.
187      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
188      __ mov(Operand(esp, receiver_offset),
189             Immediate(isolate()->factory()->undefined_value()));
190      __ bind(&ok);
191    }
192
193    if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
194      // Move state of dynamic frame alignment into edx.
195      __ Set(edx, Immediate(kNoAlignmentPadding));
196
197      Label do_not_pad, align_loop;
198      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
199      // Align esp + 4 to a multiple of 2 * kPointerSize.
200      __ test(esp, Immediate(kPointerSize));
201      __ j(not_zero, &do_not_pad, Label::kNear);
202      __ push(Immediate(0));
203      __ mov(ebx, esp);
204      __ mov(edx, Immediate(kAlignmentPaddingPushed));
205      // Copy arguments, receiver, and return address.
206      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
207
208      __ bind(&align_loop);
209      __ mov(eax, Operand(ebx, 1 * kPointerSize));
210      __ mov(Operand(ebx, 0), eax);
211      __ add(Operand(ebx), Immediate(kPointerSize));
212      __ dec(ecx);
213      __ j(not_zero, &align_loop, Label::kNear);
214      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
215      __ bind(&do_not_pad);
216    }
217  }
218
219  info()->set_prologue_offset(masm_->pc_offset());
220  if (NeedsEagerFrame()) {
221    ASSERT(!frame_is_built_);
222    frame_is_built_ = true;
223    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
224    info()->AddNoFrameRange(0, masm_->pc_offset());
225  }
226
227  if (info()->IsOptimizing() &&
228      dynamic_frame_alignment_ &&
229      FLAG_debug_code) {
230    __ test(esp, Immediate(kPointerSize));
231    __ Assert(zero, kFrameIsExpectedToBeAligned);
232  }
233
234  // Reserve space for the stack slots needed by the code.
235  int slots = GetStackSlotCount();
236  ASSERT(slots != 0 || !info()->IsOptimizing());
237  if (slots > 0) {
238    if (slots == 1) {
239      if (dynamic_frame_alignment_) {
240        __ push(edx);
241      } else {
242        __ push(Immediate(kNoAlignmentPadding));
243      }
244    } else {
245      if (FLAG_debug_code) {
246        __ sub(Operand(esp), Immediate(slots * kPointerSize));
247#ifdef _MSC_VER
248        MakeSureStackPagesMapped(slots * kPointerSize);
249#endif
250        __ push(eax);
251        __ mov(Operand(eax), Immediate(slots));
252        Label loop;
253        __ bind(&loop);
254        __ mov(MemOperand(esp, eax, times_4, 0),
255               Immediate(kSlotsZapValue));
256        __ dec(eax);
257        __ j(not_zero, &loop);
258        __ pop(eax);
259      } else {
260        __ sub(Operand(esp), Immediate(slots * kPointerSize));
261#ifdef _MSC_VER
262        MakeSureStackPagesMapped(slots * kPointerSize);
263#endif
264      }
265
266      if (support_aligned_spilled_doubles_) {
267        Comment(";;; Store dynamic frame alignment tag for spilled doubles");
268        // Store dynamic frame alignment state in the first local.
269        int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
270        if (dynamic_frame_alignment_) {
271          __ mov(Operand(ebp, offset), edx);
272        } else {
273          __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
274        }
275      }
276    }
277
278    if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
279      SaveCallerDoubles();
280    }
281  }
282
283  // Possibly allocate a local context.
284  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
285  if (heap_slots > 0) {
286    Comment(";;; Allocate local context");
287    // Argument to NewContext is the function, which is still in edi.
288    __ push(edi);
289    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
290      FastNewContextStub stub(heap_slots);
291      __ CallStub(&stub);
292    } else {
293      __ CallRuntime(Runtime::kNewFunctionContext, 1);
294    }
295    RecordSafepoint(Safepoint::kNoLazyDeopt);
296    // Context is returned in both eax and esi.  It replaces the context
297    // passed to us.  It's saved in the stack and kept live in esi.
298    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
299
300    // Copy parameters into context if necessary.
301    int num_parameters = scope()->num_parameters();
302    for (int i = 0; i < num_parameters; i++) {
303      Variable* var = scope()->parameter(i);
304      if (var->IsContextSlot()) {
305        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
306            (num_parameters - 1 - i) * kPointerSize;
307        // Load parameter from stack.
308        __ mov(eax, Operand(ebp, parameter_offset));
309        // Store it in the context.
310        int context_offset = Context::SlotOffset(var->index());
311        __ mov(Operand(esi, context_offset), eax);
312        // Update the write barrier. This clobbers eax and ebx.
313        __ RecordWriteContextSlot(esi,
314                                  context_offset,
315                                  eax,
316                                  ebx,
317                                  kDontSaveFPRegs);
318      }
319    }
320    Comment(";;; End allocate local context");
321  }
322
323  // Trace the call.
324  if (FLAG_trace && info()->IsOptimizing()) {
325    // We have not executed any compiled code yet, so esi still holds the
326    // incoming context.
327    __ CallRuntime(Runtime::kTraceEnter, 0);
328  }
329  return !is_aborted();
330}
331
332
333void LCodeGen::GenerateOsrPrologue() {
334  // Generate the OSR entry prologue at the first unknown OSR value, or if there
335  // are none, at the OSR entrypoint instruction.
336  if (osr_pc_offset_ >= 0) return;
337
338  osr_pc_offset_ = masm()->pc_offset();
339
340    // Move state of dynamic frame alignment into edx.
341  __ Set(edx, Immediate(kNoAlignmentPadding));
342
343  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
344    Label do_not_pad, align_loop;
345    // Align ebp + 4 to a multiple of 2 * kPointerSize.
346    __ test(ebp, Immediate(kPointerSize));
347    __ j(zero, &do_not_pad, Label::kNear);
348    __ push(Immediate(0));
349    __ mov(ebx, esp);
350    __ mov(edx, Immediate(kAlignmentPaddingPushed));
351
352    // Move all parts of the frame over one word. The frame consists of:
353    // unoptimized frame slots, alignment state, context, frame pointer, return
354    // address, receiver, and the arguments.
355    __ mov(ecx, Immediate(scope()->num_parameters() +
356           5 + graph()->osr()->UnoptimizedFrameSlots()));
357
358    __ bind(&align_loop);
359    __ mov(eax, Operand(ebx, 1 * kPointerSize));
360    __ mov(Operand(ebx, 0), eax);
361    __ add(Operand(ebx), Immediate(kPointerSize));
362    __ dec(ecx);
363    __ j(not_zero, &align_loop, Label::kNear);
364    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
365    __ sub(Operand(ebp), Immediate(kPointerSize));
366    __ bind(&do_not_pad);
367  }
368
369  // Save the first local, which is overwritten by the alignment state.
370  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
371  __ push(alignment_loc);
372
373  // Set the dynamic frame alignment state.
374  __ mov(alignment_loc, edx);
375
376  // Adjust the frame size, subsuming the unoptimized frame into the
377  // optimized frame.
378  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
379  ASSERT(slots >= 1);
380  __ sub(esp, Immediate((slots - 1) * kPointerSize));
381}
382
383
384void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
385  if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
386}
387
388
389void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
390  if (!CpuFeatures::IsSupported(SSE2)) {
391    if (instr->IsGoto()) {
392      x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
393    } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
394               !instr->IsGap() && !instr->IsReturn()) {
395      if (instr->ClobbersDoubleRegisters()) {
396        if (instr->HasDoubleRegisterResult()) {
397          ASSERT_EQ(1, x87_stack_.depth());
398        } else {
399          ASSERT_EQ(0, x87_stack_.depth());
400        }
401      }
402      __ VerifyX87StackDepth(x87_stack_.depth());
403    }
404  }
405}
406
407
408bool LCodeGen::GenerateJumpTable() {
409  Label needs_frame;
410  if (jump_table_.length() > 0) {
411    Comment(";;; -------------------- Jump table --------------------");
412  }
413  for (int i = 0; i < jump_table_.length(); i++) {
414    __ bind(&jump_table_[i].label);
415    Address entry = jump_table_[i].address;
416    Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
417    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
418    if (id == Deoptimizer::kNotDeoptimizationEntry) {
419      Comment(";;; jump table entry %d.", i);
420    } else {
421      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
422    }
423    if (jump_table_[i].needs_frame) {
424      ASSERT(!info()->saves_caller_doubles());
425      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
426      if (needs_frame.is_bound()) {
427        __ jmp(&needs_frame);
428      } else {
429        __ bind(&needs_frame);
430        __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
431        // This variant of deopt can only be used with stubs. Since we don't
432        // have a function pointer to install in the stack frame that we're
433        // building, install a special marker there instead.
434        ASSERT(info()->IsStub());
435        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
436        // Push a PC inside the function so that the deopt code can find where
437        // the deopt comes from. It doesn't have to be the precise return
438        // address of a "calling" LAZY deopt, it only has to be somewhere
439        // inside the code body.
440        Label push_approx_pc;
441        __ call(&push_approx_pc);
442        __ bind(&push_approx_pc);
443        // Push the continuation which was stashed were the ebp should
444        // be. Replace it with the saved ebp.
445        __ push(MemOperand(esp, 3 * kPointerSize));
446        __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
447        __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
448        __ ret(0);  // Call the continuation without clobbering registers.
449      }
450    } else {
451      if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
452        RestoreCallerDoubles();
453      }
454      __ call(entry, RelocInfo::RUNTIME_ENTRY);
455    }
456  }
457  return !is_aborted();
458}
459
460
461bool LCodeGen::GenerateDeferredCode() {
462  ASSERT(is_generating());
463  if (deferred_.length() > 0) {
464    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
465      LDeferredCode* code = deferred_[i];
466      X87Stack copy(code->x87_stack());
467      x87_stack_ = copy;
468
469      HValue* value =
470          instructions_->at(code->instruction_index())->hydrogen_value();
471      RecordAndWritePosition(value->position());
472
473      Comment(";;; <@%d,#%d> "
474              "-------------------- Deferred %s --------------------",
475              code->instruction_index(),
476              code->instr()->hydrogen_value()->id(),
477              code->instr()->Mnemonic());
478      __ bind(code->entry());
479      if (NeedsDeferredFrame()) {
480        Comment(";;; Build frame");
481        ASSERT(!frame_is_built_);
482        ASSERT(info()->IsStub());
483        frame_is_built_ = true;
484        // Build the frame in such a way that esi isn't trashed.
485        __ push(ebp);  // Caller's frame pointer.
486        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
487        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
488        __ lea(ebp, Operand(esp, 2 * kPointerSize));
489        Comment(";;; Deferred code");
490      }
491      code->Generate();
492      if (NeedsDeferredFrame()) {
493        __ bind(code->done());
494        Comment(";;; Destroy frame");
495        ASSERT(frame_is_built_);
496        frame_is_built_ = false;
497        __ mov(esp, ebp);
498        __ pop(ebp);
499      }
500      __ jmp(code->exit());
501    }
502  }
503
504  // Deferred code is the last part of the instruction sequence. Mark
505  // the generated code as done unless we bailed out.
506  if (!is_aborted()) status_ = DONE;
507  return !is_aborted();
508}
509
510
511bool LCodeGen::GenerateSafepointTable() {
512  ASSERT(is_done());
513  if (!info()->IsStub()) {
514    // For lazy deoptimization we need space to patch a call after every call.
515    // Ensure there is always space for such patching, even if the code ends
516    // in a call.
517    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
518    while (masm()->pc_offset() < target_offset) {
519      masm()->nop();
520    }
521  }
522  safepoints_.Emit(masm(), GetStackSlotCount());
523  return !is_aborted();
524}
525
526
527Register LCodeGen::ToRegister(int index) const {
528  return Register::FromAllocationIndex(index);
529}
530
531
532X87Register LCodeGen::ToX87Register(int index) const {
533  return X87Register::FromAllocationIndex(index);
534}
535
536
537XMMRegister LCodeGen::ToDoubleRegister(int index) const {
538  return XMMRegister::FromAllocationIndex(index);
539}
540
541
542void LCodeGen::X87LoadForUsage(X87Register reg) {
543  ASSERT(x87_stack_.Contains(reg));
544  x87_stack_.Fxch(reg);
545  x87_stack_.pop();
546}
547
548
549void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
550  ASSERT(x87_stack_.Contains(reg1));
551  ASSERT(x87_stack_.Contains(reg2));
552  x87_stack_.Fxch(reg1, 1);
553  x87_stack_.Fxch(reg2);
554  x87_stack_.pop();
555  x87_stack_.pop();
556}
557
558
559void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
560  ASSERT(is_mutable_);
561  ASSERT(Contains(reg) && stack_depth_ > other_slot);
562  int i  = ArrayIndex(reg);
563  int st = st2idx(i);
564  if (st != other_slot) {
565    int other_i = st2idx(other_slot);
566    X87Register other = stack_[other_i];
567    stack_[other_i]   = reg;
568    stack_[i]         = other;
569    if (st == 0) {
570      __ fxch(other_slot);
571    } else if (other_slot == 0) {
572      __ fxch(st);
573    } else {
574      __ fxch(st);
575      __ fxch(other_slot);
576      __ fxch(st);
577    }
578  }
579}
580
581
582int LCodeGen::X87Stack::st2idx(int pos) {
583  return stack_depth_ - pos - 1;
584}
585
586
587int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
588  for (int i = 0; i < stack_depth_; i++) {
589    if (stack_[i].is(reg)) return i;
590  }
591  UNREACHABLE();
592  return -1;
593}
594
595
596bool LCodeGen::X87Stack::Contains(X87Register reg) {
597  for (int i = 0; i < stack_depth_; i++) {
598    if (stack_[i].is(reg)) return true;
599  }
600  return false;
601}
602
603
604void LCodeGen::X87Stack::Free(X87Register reg) {
605  ASSERT(is_mutable_);
606  ASSERT(Contains(reg));
607  int i  = ArrayIndex(reg);
608  int st = st2idx(i);
609  if (st > 0) {
610    // keep track of how fstp(i) changes the order of elements
611    int tos_i = st2idx(0);
612    stack_[i] = stack_[tos_i];
613  }
614  pop();
615  __ fstp(st);
616}
617
618
619void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
620  if (x87_stack_.Contains(dst)) {
621    x87_stack_.Fxch(dst);
622    __ fstp(0);
623  } else {
624    x87_stack_.push(dst);
625  }
626  X87Fld(src, opts);
627}
628
629
630void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
631  ASSERT(!src.is_reg_only());
632  switch (opts) {
633    case kX87DoubleOperand:
634      __ fld_d(src);
635      break;
636    case kX87FloatOperand:
637      __ fld_s(src);
638      break;
639    case kX87IntOperand:
640      __ fild_s(src);
641      break;
642    default:
643      UNREACHABLE();
644  }
645}
646
647
648void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
649  ASSERT(!dst.is_reg_only());
650  x87_stack_.Fxch(src);
651  switch (opts) {
652    case kX87DoubleOperand:
653      __ fst_d(dst);
654      break;
655    case kX87IntOperand:
656      __ fist_s(dst);
657      break;
658    default:
659      UNREACHABLE();
660  }
661}
662
663
664void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
665  ASSERT(is_mutable_);
666  if (Contains(reg)) {
667    Free(reg);
668  }
669  // Mark this register as the next register to write to
670  stack_[stack_depth_] = reg;
671}
672
673
674void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
675  ASSERT(is_mutable_);
676  // Assert the reg is prepared to write, but not on the virtual stack yet
677  ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
678      stack_depth_ < X87Register::kNumAllocatableRegisters);
679  stack_depth_++;
680}
681
682
683void LCodeGen::X87PrepareBinaryOp(
684    X87Register left, X87Register right, X87Register result) {
685  // You need to use DefineSameAsFirst for x87 instructions
686  ASSERT(result.is(left));
687  x87_stack_.Fxch(right, 1);
688  x87_stack_.Fxch(left);
689}
690
691
692void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
693  if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
694    bool double_inputs = instr->HasDoubleRegisterInput();
695
696    // Flush stack from tos down, since FreeX87() will mess with tos
697    for (int i = stack_depth_-1; i >= 0; i--) {
698      X87Register reg = stack_[i];
699      // Skip registers which contain the inputs for the next instruction
700      // when flushing the stack
701      if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
702        continue;
703      }
704      Free(reg);
705      if (i < stack_depth_-1) i++;
706    }
707  }
708  if (instr->IsReturn()) {
709    while (stack_depth_ > 0) {
710      __ fstp(0);
711      stack_depth_--;
712    }
713    if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
714  }
715}
716
717
718void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
719  ASSERT(stack_depth_ <= 1);
720  // If ever used for new stubs producing two pairs of doubles joined into two
721  // phis this assert hits. That situation is not handled, since the two stacks
722  // might have st0 and st1 swapped.
723  if (current_block_id + 1 != goto_instr->block_id()) {
724    // If we have a value on the x87 stack on leaving a block, it must be a
725    // phi input. If the next block we compile is not the join block, we have
726    // to discard the stack state.
727    stack_depth_ = 0;
728  }
729}
730
731
732void LCodeGen::EmitFlushX87ForDeopt() {
733  // The deoptimizer does not support X87 Registers. But as long as we
734  // deopt from a stub its not a problem, since we will re-materialize the
735  // original stub inputs, which can't be double registers.
736  ASSERT(info()->IsStub());
737  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
738    __ pushfd();
739    __ VerifyX87StackDepth(x87_stack_.depth());
740    __ popfd();
741  }
742  for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
743}
744
745
746Register LCodeGen::ToRegister(LOperand* op) const {
747  ASSERT(op->IsRegister());
748  return ToRegister(op->index());
749}
750
751
752X87Register LCodeGen::ToX87Register(LOperand* op) const {
753  ASSERT(op->IsDoubleRegister());
754  return ToX87Register(op->index());
755}
756
757
758XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
759  ASSERT(op->IsDoubleRegister());
760  return ToDoubleRegister(op->index());
761}
762
763
764int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
765  return ToRepresentation(op, Representation::Integer32());
766}
767
768
769int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
770                                   const Representation& r) const {
771  HConstant* constant = chunk_->LookupConstant(op);
772  int32_t value = constant->Integer32Value();
773  if (r.IsInteger32()) return value;
774  ASSERT(r.IsSmiOrTagged());
775  return reinterpret_cast<int32_t>(Smi::FromInt(value));
776}
777
778
779Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
780  HConstant* constant = chunk_->LookupConstant(op);
781  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
782  return constant->handle(isolate());
783}
784
785
786double LCodeGen::ToDouble(LConstantOperand* op) const {
787  HConstant* constant = chunk_->LookupConstant(op);
788  ASSERT(constant->HasDoubleValue());
789  return constant->DoubleValue();
790}
791
792
793ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
794  HConstant* constant = chunk_->LookupConstant(op);
795  ASSERT(constant->HasExternalReferenceValue());
796  return constant->ExternalReferenceValue();
797}
798
799
800bool LCodeGen::IsInteger32(LConstantOperand* op) const {
801  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
802}
803
804
805bool LCodeGen::IsSmi(LConstantOperand* op) const {
806  return chunk_->LookupLiteralRepresentation(op).IsSmi();
807}
808
809
810static int ArgumentsOffsetWithoutFrame(int index) {
811  ASSERT(index < 0);
812  return -(index + 1) * kPointerSize + kPCOnStackSize;
813}
814
815
816Operand LCodeGen::ToOperand(LOperand* op) const {
817  if (op->IsRegister()) return Operand(ToRegister(op));
818  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
819  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
820  if (NeedsEagerFrame()) {
821    return Operand(ebp, StackSlotOffset(op->index()));
822  } else {
823    // Retrieve parameter without eager stack-frame relative to the
824    // stack-pointer.
825    return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
826  }
827}
828
829
830Operand LCodeGen::HighOperand(LOperand* op) {
831  ASSERT(op->IsDoubleStackSlot());
832  if (NeedsEagerFrame()) {
833    return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
834  } else {
835    // Retrieve parameter without eager stack-frame relative to the
836    // stack-pointer.
837    return Operand(
838        esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
839  }
840}
841
842
843void LCodeGen::WriteTranslation(LEnvironment* environment,
844                                Translation* translation) {
845  if (environment == NULL) return;
846
847  // The translation includes one command per value in the environment.
848  int translation_size = environment->translation_size();
849  // The output frame height does not include the parameters.
850  int height = translation_size - environment->parameter_count();
851
852  WriteTranslation(environment->outer(), translation);
853  bool has_closure_id = !info()->closure().is_null() &&
854      !info()->closure().is_identical_to(environment->closure());
855  int closure_id = has_closure_id
856      ? DefineDeoptimizationLiteral(environment->closure())
857      : Translation::kSelfLiteralId;
858  switch (environment->frame_type()) {
859    case JS_FUNCTION:
860      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
861      break;
862    case JS_CONSTRUCT:
863      translation->BeginConstructStubFrame(closure_id, translation_size);
864      break;
865    case JS_GETTER:
866      ASSERT(translation_size == 1);
867      ASSERT(height == 0);
868      translation->BeginGetterStubFrame(closure_id);
869      break;
870    case JS_SETTER:
871      ASSERT(translation_size == 2);
872      ASSERT(height == 0);
873      translation->BeginSetterStubFrame(closure_id);
874      break;
875    case ARGUMENTS_ADAPTOR:
876      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
877      break;
878    case STUB:
879      translation->BeginCompiledStubFrame();
880      break;
881    default:
882      UNREACHABLE();
883  }
884
885  int object_index = 0;
886  int dematerialized_index = 0;
887  for (int i = 0; i < translation_size; ++i) {
888    LOperand* value = environment->values()->at(i);
889    AddToTranslation(environment,
890                     translation,
891                     value,
892                     environment->HasTaggedValueAt(i),
893                     environment->HasUint32ValueAt(i),
894                     &object_index,
895                     &dematerialized_index);
896  }
897}
898
899
900void LCodeGen::AddToTranslation(LEnvironment* environment,
901                                Translation* translation,
902                                LOperand* op,
903                                bool is_tagged,
904                                bool is_uint32,
905                                int* object_index_pointer,
906                                int* dematerialized_index_pointer) {
907  if (op == LEnvironment::materialization_marker()) {
908    int object_index = (*object_index_pointer)++;
909    if (environment->ObjectIsDuplicateAt(object_index)) {
910      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
911      translation->DuplicateObject(dupe_of);
912      return;
913    }
914    int object_length = environment->ObjectLengthAt(object_index);
915    if (environment->ObjectIsArgumentsAt(object_index)) {
916      translation->BeginArgumentsObject(object_length);
917    } else {
918      translation->BeginCapturedObject(object_length);
919    }
920    int dematerialized_index = *dematerialized_index_pointer;
921    int env_offset = environment->translation_size() + dematerialized_index;
922    *dematerialized_index_pointer += object_length;
923    for (int i = 0; i < object_length; ++i) {
924      LOperand* value = environment->values()->at(env_offset + i);
925      AddToTranslation(environment,
926                       translation,
927                       value,
928                       environment->HasTaggedValueAt(env_offset + i),
929                       environment->HasUint32ValueAt(env_offset + i),
930                       object_index_pointer,
931                       dematerialized_index_pointer);
932    }
933    return;
934  }
935
936  if (op->IsStackSlot()) {
937    if (is_tagged) {
938      translation->StoreStackSlot(op->index());
939    } else if (is_uint32) {
940      translation->StoreUint32StackSlot(op->index());
941    } else {
942      translation->StoreInt32StackSlot(op->index());
943    }
944  } else if (op->IsDoubleStackSlot()) {
945    translation->StoreDoubleStackSlot(op->index());
946  } else if (op->IsArgument()) {
947    ASSERT(is_tagged);
948    int src_index = GetStackSlotCount() + op->index();
949    translation->StoreStackSlot(src_index);
950  } else if (op->IsRegister()) {
951    Register reg = ToRegister(op);
952    if (is_tagged) {
953      translation->StoreRegister(reg);
954    } else if (is_uint32) {
955      translation->StoreUint32Register(reg);
956    } else {
957      translation->StoreInt32Register(reg);
958    }
959  } else if (op->IsDoubleRegister()) {
960    XMMRegister reg = ToDoubleRegister(op);
961    translation->StoreDoubleRegister(reg);
962  } else if (op->IsConstantOperand()) {
963    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
964    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
965    translation->StoreLiteral(src_index);
966  } else {
967    UNREACHABLE();
968  }
969}
970
971
972void LCodeGen::CallCodeGeneric(Handle<Code> code,
973                               RelocInfo::Mode mode,
974                               LInstruction* instr,
975                               SafepointMode safepoint_mode) {
976  ASSERT(instr != NULL);
977  __ call(code, mode);
978  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
979
980  // Signal that we don't inline smi code before these stubs in the
981  // optimizing code generator.
982  if (code->kind() == Code::BINARY_OP_IC ||
983      code->kind() == Code::COMPARE_IC) {
984    __ nop();
985  }
986}
987
988
989void LCodeGen::CallCode(Handle<Code> code,
990                        RelocInfo::Mode mode,
991                        LInstruction* instr) {
992  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
993}
994
995
996void LCodeGen::CallRuntime(const Runtime::Function* fun,
997                           int argc,
998                           LInstruction* instr,
999                           SaveFPRegsMode save_doubles) {
1000  ASSERT(instr != NULL);
1001  ASSERT(instr->HasPointerMap());
1002
1003  __ CallRuntime(fun, argc, save_doubles);
1004
1005  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1006
1007  ASSERT(info()->is_calling());
1008}
1009
1010
1011void LCodeGen::LoadContextFromDeferred(LOperand* context) {
1012  if (context->IsRegister()) {
1013    if (!ToRegister(context).is(esi)) {
1014      __ mov(esi, ToRegister(context));
1015    }
1016  } else if (context->IsStackSlot()) {
1017    __ mov(esi, ToOperand(context));
1018  } else if (context->IsConstantOperand()) {
1019    HConstant* constant =
1020        chunk_->LookupConstant(LConstantOperand::cast(context));
1021    __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
1022  } else {
1023    UNREACHABLE();
1024  }
1025}
1026
1027void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
1028                                       int argc,
1029                                       LInstruction* instr,
1030                                       LOperand* context) {
1031  LoadContextFromDeferred(context);
1032
1033  __ CallRuntimeSaveDoubles(id);
1034  RecordSafepointWithRegisters(
1035      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
1036
1037  ASSERT(info()->is_calling());
1038}
1039
1040
1041void LCodeGen::RegisterEnvironmentForDeoptimization(
1042    LEnvironment* environment, Safepoint::DeoptMode mode) {
1043  if (!environment->HasBeenRegistered()) {
1044    // Physical stack frame layout:
1045    // -x ............. -4  0 ..................................... y
1046    // [incoming arguments] [spill slots] [pushed outgoing arguments]
1047
1048    // Layout of the environment:
1049    // 0 ..................................................... size-1
1050    // [parameters] [locals] [expression stack including arguments]
1051
1052    // Layout of the translation:
1053    // 0 ........................................................ size - 1 + 4
1054    // [expression stack including arguments] [locals] [4 words] [parameters]
1055    // |>------------  translation_size ------------<|
1056
1057    int frame_count = 0;
1058    int jsframe_count = 0;
1059    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1060      ++frame_count;
1061      if (e->frame_type() == JS_FUNCTION) {
1062        ++jsframe_count;
1063      }
1064    }
1065    Translation translation(&translations_, frame_count, jsframe_count, zone());
1066    WriteTranslation(environment, &translation);
1067    int deoptimization_index = deoptimizations_.length();
1068    int pc_offset = masm()->pc_offset();
1069    environment->Register(deoptimization_index,
1070                          translation.index(),
1071                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1072    deoptimizations_.Add(environment, zone());
1073  }
1074}
1075
1076
1077void LCodeGen::DeoptimizeIf(Condition cc,
1078                            LEnvironment* environment,
1079                            Deoptimizer::BailoutType bailout_type) {
1080  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1081  ASSERT(environment->HasBeenRegistered());
1082  int id = environment->deoptimization_index();
1083  ASSERT(info()->IsOptimizing() || info()->IsStub());
1084  Address entry =
1085      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1086  if (entry == NULL) {
1087    Abort(kBailoutWasNotPrepared);
1088    return;
1089  }
1090
1091  if (DeoptEveryNTimes()) {
1092    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1093    Label no_deopt;
1094    __ pushfd();
1095    __ push(eax);
1096    __ mov(eax, Operand::StaticVariable(count));
1097    __ sub(eax, Immediate(1));
1098    __ j(not_zero, &no_deopt, Label::kNear);
1099    if (FLAG_trap_on_deopt) __ int3();
1100    __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1101    __ mov(Operand::StaticVariable(count), eax);
1102    __ pop(eax);
1103    __ popfd();
1104    ASSERT(frame_is_built_);
1105    __ call(entry, RelocInfo::RUNTIME_ENTRY);
1106    __ bind(&no_deopt);
1107    __ mov(Operand::StaticVariable(count), eax);
1108    __ pop(eax);
1109    __ popfd();
1110  }
1111
1112  // Before Instructions which can deopt, we normally flush the x87 stack. But
1113  // we can have inputs or outputs of the current instruction on the stack,
1114  // thus we need to flush them here from the physical stack to leave it in a
1115  // consistent state.
1116  if (x87_stack_.depth() > 0) {
1117    Label done;
1118    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1119    EmitFlushX87ForDeopt();
1120    __ bind(&done);
1121  }
1122
1123  if (info()->ShouldTrapOnDeopt()) {
1124    Label done;
1125    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1126    __ int3();
1127    __ bind(&done);
1128  }
1129
1130  ASSERT(info()->IsStub() || frame_is_built_);
1131  if (cc == no_condition && frame_is_built_) {
1132    __ call(entry, RelocInfo::RUNTIME_ENTRY);
1133  } else {
1134    // We often have several deopts to the same entry, reuse the last
1135    // jump entry if this is the case.
1136    if (jump_table_.is_empty() ||
1137        jump_table_.last().address != entry ||
1138        jump_table_.last().needs_frame != !frame_is_built_ ||
1139        jump_table_.last().bailout_type != bailout_type) {
1140      Deoptimizer::JumpTableEntry table_entry(entry,
1141                                              bailout_type,
1142                                              !frame_is_built_);
1143      jump_table_.Add(table_entry, zone());
1144    }
1145    if (cc == no_condition) {
1146      __ jmp(&jump_table_.last().label);
1147    } else {
1148      __ j(cc, &jump_table_.last().label);
1149    }
1150  }
1151}
1152
1153
1154void LCodeGen::DeoptimizeIf(Condition cc,
1155                            LEnvironment* environment) {
1156  Deoptimizer::BailoutType bailout_type = info()->IsStub()
1157      ? Deoptimizer::LAZY
1158      : Deoptimizer::EAGER;
1159  DeoptimizeIf(cc, environment, bailout_type);
1160}
1161
1162
1163void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1164  int length = deoptimizations_.length();
1165  if (length == 0) return;
1166  Handle<DeoptimizationInputData> data =
1167      factory()->NewDeoptimizationInputData(length, TENURED);
1168
1169  Handle<ByteArray> translations =
1170      translations_.CreateByteArray(isolate()->factory());
1171  data->SetTranslationByteArray(*translations);
1172  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1173
1174  Handle<FixedArray> literals =
1175      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1176  { AllowDeferredHandleDereference copy_handles;
1177    for (int i = 0; i < deoptimization_literals_.length(); i++) {
1178      literals->set(i, *deoptimization_literals_[i]);
1179    }
1180    data->SetLiteralArray(*literals);
1181  }
1182
1183  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1184  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1185
1186  // Populate the deoptimization entries.
1187  for (int i = 0; i < length; i++) {
1188    LEnvironment* env = deoptimizations_[i];
1189    data->SetAstId(i, env->ast_id());
1190    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1191    data->SetArgumentsStackHeight(i,
1192                                  Smi::FromInt(env->arguments_stack_height()));
1193    data->SetPc(i, Smi::FromInt(env->pc_offset()));
1194  }
1195  code->set_deoptimization_data(*data);
1196}
1197
1198
1199int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1200  int result = deoptimization_literals_.length();
1201  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1202    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1203  }
1204  deoptimization_literals_.Add(literal, zone());
1205  return result;
1206}
1207
1208
1209void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1210  ASSERT(deoptimization_literals_.length() == 0);
1211
1212  const ZoneList<Handle<JSFunction> >* inlined_closures =
1213      chunk()->inlined_closures();
1214
1215  for (int i = 0, length = inlined_closures->length();
1216       i < length;
1217       i++) {
1218    DefineDeoptimizationLiteral(inlined_closures->at(i));
1219  }
1220
1221  inlined_function_count_ = deoptimization_literals_.length();
1222}
1223
1224
1225void LCodeGen::RecordSafepointWithLazyDeopt(
1226    LInstruction* instr, SafepointMode safepoint_mode) {
1227  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1228    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1229  } else {
1230    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1231    RecordSafepointWithRegisters(
1232        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1233  }
1234}
1235
1236
1237void LCodeGen::RecordSafepoint(
1238    LPointerMap* pointers,
1239    Safepoint::Kind kind,
1240    int arguments,
1241    Safepoint::DeoptMode deopt_mode) {
1242  ASSERT(kind == expected_safepoint_kind_);
1243  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1244  Safepoint safepoint =
1245      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1246  for (int i = 0; i < operands->length(); i++) {
1247    LOperand* pointer = operands->at(i);
1248    if (pointer->IsStackSlot()) {
1249      safepoint.DefinePointerSlot(pointer->index(), zone());
1250    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1251      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1252    }
1253  }
1254}
1255
1256
1257void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1258                               Safepoint::DeoptMode mode) {
1259  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1260}
1261
1262
1263void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1264  LPointerMap empty_pointers(zone());
1265  RecordSafepoint(&empty_pointers, mode);
1266}
1267
1268
1269void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1270                                            int arguments,
1271                                            Safepoint::DeoptMode mode) {
1272  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1273}
1274
1275
1276void LCodeGen::RecordAndWritePosition(int position) {
1277  if (position == RelocInfo::kNoPosition) return;
1278  masm()->positions_recorder()->RecordPosition(position);
1279  masm()->positions_recorder()->WriteRecordedPositions();
1280}
1281
1282
1283static const char* LabelType(LLabel* label) {
1284  if (label->is_loop_header()) return " (loop header)";
1285  if (label->is_osr_entry()) return " (OSR entry)";
1286  return "";
1287}
1288
1289
1290void LCodeGen::DoLabel(LLabel* label) {
1291  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1292          current_instruction_,
1293          label->hydrogen_value()->id(),
1294          label->block_id(),
1295          LabelType(label));
1296  __ bind(label->label());
1297  current_block_ = label->block_id();
1298  DoGap(label);
1299}
1300
1301
1302void LCodeGen::DoParallelMove(LParallelMove* move) {
1303  resolver_.Resolve(move);
1304}
1305
1306
1307void LCodeGen::DoGap(LGap* gap) {
1308  for (int i = LGap::FIRST_INNER_POSITION;
1309       i <= LGap::LAST_INNER_POSITION;
1310       i++) {
1311    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1312    LParallelMove* move = gap->GetParallelMove(inner_pos);
1313    if (move != NULL) DoParallelMove(move);
1314  }
1315}
1316
1317
1318void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1319  DoGap(instr);
1320}
1321
1322
1323void LCodeGen::DoParameter(LParameter* instr) {
1324  // Nothing to do.
1325}
1326
1327
1328void LCodeGen::DoCallStub(LCallStub* instr) {
1329  ASSERT(ToRegister(instr->context()).is(esi));
1330  ASSERT(ToRegister(instr->result()).is(eax));
1331  switch (instr->hydrogen()->major_key()) {
1332    case CodeStub::RegExpConstructResult: {
1333      RegExpConstructResultStub stub;
1334      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1335      break;
1336    }
1337    case CodeStub::RegExpExec: {
1338      RegExpExecStub stub;
1339      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1340      break;
1341    }
1342    case CodeStub::SubString: {
1343      SubStringStub stub;
1344      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1345      break;
1346    }
1347    case CodeStub::StringCompare: {
1348      StringCompareStub stub;
1349      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1350      break;
1351    }
1352    case CodeStub::TranscendentalCache: {
1353      TranscendentalCacheStub stub(instr->transcendental_type(),
1354                                   TranscendentalCacheStub::TAGGED);
1355      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1356      break;
1357    }
1358    default:
1359      UNREACHABLE();
1360  }
1361}
1362
1363
1364void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1365  GenerateOsrPrologue();
1366}
1367
1368
1369void LCodeGen::DoModI(LModI* instr) {
1370  HMod* hmod = instr->hydrogen();
1371  HValue* left = hmod->left();
1372  HValue* right = hmod->right();
1373  if (hmod->HasPowerOf2Divisor()) {
1374    // TODO(svenpanne) We should really do the strength reduction on the
1375    // Hydrogen level.
1376    Register left_reg = ToRegister(instr->left());
1377    ASSERT(left_reg.is(ToRegister(instr->result())));
1378
1379    // Note: The code below even works when right contains kMinInt.
1380    int32_t divisor = Abs(right->GetInteger32Constant());
1381
1382    Label left_is_not_negative, done;
1383    if (left->CanBeNegative()) {
1384      __ test(left_reg, Operand(left_reg));
1385      __ j(not_sign, &left_is_not_negative, Label::kNear);
1386      __ neg(left_reg);
1387      __ and_(left_reg, divisor - 1);
1388      __ neg(left_reg);
1389      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1390        DeoptimizeIf(zero, instr->environment());
1391      }
1392      __ jmp(&done, Label::kNear);
1393    }
1394
1395    __ bind(&left_is_not_negative);
1396    __ and_(left_reg, divisor - 1);
1397    __ bind(&done);
1398  } else {
1399    Register left_reg = ToRegister(instr->left());
1400    ASSERT(left_reg.is(eax));
1401    Register right_reg = ToRegister(instr->right());
1402    ASSERT(!right_reg.is(eax));
1403    ASSERT(!right_reg.is(edx));
1404    Register result_reg = ToRegister(instr->result());
1405    ASSERT(result_reg.is(edx));
1406
1407    Label done;
1408    // Check for x % 0, idiv would signal a divide error. We have to
1409    // deopt in this case because we can't return a NaN.
1410    if (right->CanBeZero()) {
1411      __ test(right_reg, Operand(right_reg));
1412      DeoptimizeIf(zero, instr->environment());
1413    }
1414
1415    // Check for kMinInt % -1, idiv would signal a divide error. We
1416    // have to deopt if we care about -0, because we can't return that.
1417    if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1418      Label no_overflow_possible;
1419      __ cmp(left_reg, kMinInt);
1420      __ j(not_equal, &no_overflow_possible, Label::kNear);
1421      __ cmp(right_reg, -1);
1422      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1423        DeoptimizeIf(equal, instr->environment());
1424      } else {
1425        __ j(not_equal, &no_overflow_possible, Label::kNear);
1426        __ Set(result_reg, Immediate(0));
1427        __ jmp(&done, Label::kNear);
1428      }
1429      __ bind(&no_overflow_possible);
1430    }
1431
1432    // Sign extend dividend in eax into edx:eax.
1433    __ cdq();
1434
1435    // If we care about -0, test if the dividend is <0 and the result is 0.
1436    if (left->CanBeNegative() &&
1437        hmod->CanBeZero() &&
1438        hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1439      Label positive_left;
1440      __ test(left_reg, Operand(left_reg));
1441      __ j(not_sign, &positive_left, Label::kNear);
1442      __ idiv(right_reg);
1443      __ test(result_reg, Operand(result_reg));
1444      DeoptimizeIf(zero, instr->environment());
1445      __ jmp(&done, Label::kNear);
1446      __ bind(&positive_left);
1447    }
1448    __ idiv(right_reg);
1449    __ bind(&done);
1450  }
1451}
1452
1453
1454void LCodeGen::DoDivI(LDivI* instr) {
1455  if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
1456    Register dividend = ToRegister(instr->left());
1457    int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
1458    int32_t test_value = 0;
1459    int32_t power = 0;
1460
1461    if (divisor > 0) {
1462      test_value = divisor - 1;
1463      power = WhichPowerOf2(divisor);
1464    } else {
1465      // Check for (0 / -x) that will produce negative zero.
1466      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1467        __ test(dividend, Operand(dividend));
1468        DeoptimizeIf(zero, instr->environment());
1469      }
1470      // Check for (kMinInt / -1).
1471      if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1472        __ cmp(dividend, kMinInt);
1473        DeoptimizeIf(zero, instr->environment());
1474      }
1475      test_value = - divisor - 1;
1476      power = WhichPowerOf2(-divisor);
1477    }
1478
1479    if (test_value != 0) {
1480      if (instr->hydrogen()->CheckFlag(
1481          HInstruction::kAllUsesTruncatingToInt32)) {
1482        Label done, negative;
1483        __ cmp(dividend, 0);
1484        __ j(less, &negative, Label::kNear);
1485        __ sar(dividend, power);
1486        if (divisor < 0) __ neg(dividend);
1487        __ jmp(&done, Label::kNear);
1488
1489        __ bind(&negative);
1490        __ neg(dividend);
1491        __ sar(dividend, power);
1492        if (divisor > 0) __ neg(dividend);
1493        __ bind(&done);
1494        return;  // Don't fall through to "__ neg" below.
1495      } else {
1496        // Deoptimize if remainder is not 0.
1497        __ test(dividend, Immediate(test_value));
1498        DeoptimizeIf(not_zero, instr->environment());
1499        __ sar(dividend, power);
1500      }
1501    }
1502
1503    if (divisor < 0) __ neg(dividend);
1504
1505    return;
1506  }
1507
1508  LOperand* right = instr->right();
1509  ASSERT(ToRegister(instr->result()).is(eax));
1510  ASSERT(ToRegister(instr->left()).is(eax));
1511  ASSERT(!ToRegister(instr->right()).is(eax));
1512  ASSERT(!ToRegister(instr->right()).is(edx));
1513
1514  Register left_reg = eax;
1515
1516  // Check for x / 0.
1517  Register right_reg = ToRegister(right);
1518  if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1519    __ test(right_reg, ToOperand(right));
1520    DeoptimizeIf(zero, instr->environment());
1521  }
1522
1523  // Check for (0 / -x) that will produce negative zero.
1524  if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1525    Label left_not_zero;
1526    __ test(left_reg, Operand(left_reg));
1527    __ j(not_zero, &left_not_zero, Label::kNear);
1528    __ test(right_reg, ToOperand(right));
1529    DeoptimizeIf(sign, instr->environment());
1530    __ bind(&left_not_zero);
1531  }
1532
1533  // Check for (kMinInt / -1).
1534  if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
1535    Label left_not_min_int;
1536    __ cmp(left_reg, kMinInt);
1537    __ j(not_zero, &left_not_min_int, Label::kNear);
1538    __ cmp(right_reg, -1);
1539    DeoptimizeIf(zero, instr->environment());
1540    __ bind(&left_not_min_int);
1541  }
1542
1543  // Sign extend to edx.
1544  __ cdq();
1545  __ idiv(right_reg);
1546
1547  if (instr->is_flooring()) {
1548    Label done;
1549    __ test(edx, edx);
1550    __ j(zero, &done, Label::kNear);
1551    __ xor_(edx, right_reg);
1552    __ sar(edx, 31);
1553    __ add(eax, edx);
1554    __ bind(&done);
1555  } else if (!instr->hydrogen()->CheckFlag(
1556      HInstruction::kAllUsesTruncatingToInt32)) {
1557    // Deoptimize if remainder is not 0.
1558    __ test(edx, Operand(edx));
1559    DeoptimizeIf(not_zero, instr->environment());
1560  }
1561}
1562
1563
1564void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1565  ASSERT(instr->right()->IsConstantOperand());
1566
1567  Register dividend = ToRegister(instr->left());
1568  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1569  Register result = ToRegister(instr->result());
1570
1571  switch (divisor) {
1572  case 0:
1573    DeoptimizeIf(no_condition, instr->environment());
1574    return;
1575
1576  case 1:
1577    __ Move(result, dividend);
1578    return;
1579
1580  case -1:
1581    __ Move(result, dividend);
1582    __ neg(result);
1583    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1584      DeoptimizeIf(zero, instr->environment());
1585    }
1586    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1587      DeoptimizeIf(overflow, instr->environment());
1588    }
1589    return;
1590  }
1591
1592  uint32_t divisor_abs = abs(divisor);
1593  if (IsPowerOf2(divisor_abs)) {
1594    int32_t power = WhichPowerOf2(divisor_abs);
1595    if (divisor < 0) {
1596      // Input[dividend] is clobbered.
1597      // The sequence is tedious because neg(dividend) might overflow.
1598      __ mov(result, dividend);
1599      __ sar(dividend, 31);
1600      __ neg(result);
1601      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1602        DeoptimizeIf(zero, instr->environment());
1603      }
1604      __ shl(dividend, 32 - power);
1605      __ sar(result, power);
1606      __ not_(dividend);
1607      // Clear result.sign if dividend.sign is set.
1608      __ and_(result, dividend);
1609    } else {
1610      __ Move(result, dividend);
1611      __ sar(result, power);
1612    }
1613  } else {
1614    ASSERT(ToRegister(instr->left()).is(eax));
1615    ASSERT(ToRegister(instr->result()).is(edx));
1616    Register scratch = ToRegister(instr->temp());
1617
1618    // Find b which: 2^b < divisor_abs < 2^(b+1).
1619    unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
1620    unsigned shift = 32 + b;  // Precision +1bit (effectively).
1621    double multiplier_f =
1622        static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
1623    int64_t multiplier;
1624    if (multiplier_f - floor(multiplier_f) < 0.5) {
1625        multiplier = static_cast<int64_t>(floor(multiplier_f));
1626    } else {
1627        multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
1628    }
1629    // The multiplier is a uint32.
1630    ASSERT(multiplier > 0 &&
1631           multiplier < (static_cast<int64_t>(1) << 32));
1632    __ mov(scratch, dividend);
1633    if (divisor < 0 &&
1634        instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1635      __ test(dividend, dividend);
1636      DeoptimizeIf(zero, instr->environment());
1637    }
1638    __ mov(edx, static_cast<int32_t>(multiplier));
1639    __ imul(edx);
1640    if (static_cast<int32_t>(multiplier) < 0) {
1641      __ add(edx, scratch);
1642    }
1643    Register reg_lo = eax;
1644    Register reg_byte_scratch = scratch;
1645    if (!reg_byte_scratch.is_byte_register()) {
1646        __ xchg(reg_lo, reg_byte_scratch);
1647        reg_lo = scratch;
1648        reg_byte_scratch = eax;
1649    }
1650    if (divisor < 0) {
1651      __ xor_(reg_byte_scratch, reg_byte_scratch);
1652      __ cmp(reg_lo, 0x40000000);
1653      __ setcc(above, reg_byte_scratch);
1654      __ neg(edx);
1655      __ sub(edx, reg_byte_scratch);
1656    } else {
1657      __ xor_(reg_byte_scratch, reg_byte_scratch);
1658      __ cmp(reg_lo, 0xC0000000);
1659      __ setcc(above_equal, reg_byte_scratch);
1660      __ add(edx, reg_byte_scratch);
1661    }
1662    __ sar(edx, shift - 32);
1663  }
1664}
1665
1666
1667void LCodeGen::DoMulI(LMulI* instr) {
1668  Register left = ToRegister(instr->left());
1669  LOperand* right = instr->right();
1670
1671  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1672    __ mov(ToRegister(instr->temp()), left);
1673  }
1674
1675  if (right->IsConstantOperand()) {
1676    // Try strength reductions on the multiplication.
1677    // All replacement instructions are at most as long as the imul
1678    // and have better latency.
1679    int constant = ToInteger32(LConstantOperand::cast(right));
1680    if (constant == -1) {
1681      __ neg(left);
1682    } else if (constant == 0) {
1683      __ xor_(left, Operand(left));
1684    } else if (constant == 2) {
1685      __ add(left, Operand(left));
1686    } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1687      // If we know that the multiplication can't overflow, it's safe to
1688      // use instructions that don't set the overflow flag for the
1689      // multiplication.
1690      switch (constant) {
1691        case 1:
1692          // Do nothing.
1693          break;
1694        case 3:
1695          __ lea(left, Operand(left, left, times_2, 0));
1696          break;
1697        case 4:
1698          __ shl(left, 2);
1699          break;
1700        case 5:
1701          __ lea(left, Operand(left, left, times_4, 0));
1702          break;
1703        case 8:
1704          __ shl(left, 3);
1705          break;
1706        case 9:
1707          __ lea(left, Operand(left, left, times_8, 0));
1708          break;
1709        case 16:
1710          __ shl(left, 4);
1711          break;
1712        default:
1713          __ imul(left, left, constant);
1714          break;
1715      }
1716    } else {
1717      __ imul(left, left, constant);
1718    }
1719  } else {
1720    if (instr->hydrogen()->representation().IsSmi()) {
1721      __ SmiUntag(left);
1722    }
1723    __ imul(left, ToOperand(right));
1724  }
1725
1726  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1727    DeoptimizeIf(overflow, instr->environment());
1728  }
1729
1730  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1731    // Bail out if the result is supposed to be negative zero.
1732    Label done;
1733    __ test(left, Operand(left));
1734    __ j(not_zero, &done, Label::kNear);
1735    if (right->IsConstantOperand()) {
1736      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1737        DeoptimizeIf(no_condition, instr->environment());
1738      } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1739        __ cmp(ToRegister(instr->temp()), Immediate(0));
1740        DeoptimizeIf(less, instr->environment());
1741      }
1742    } else {
1743      // Test the non-zero operand for negative sign.
1744      __ or_(ToRegister(instr->temp()), ToOperand(right));
1745      DeoptimizeIf(sign, instr->environment());
1746    }
1747    __ bind(&done);
1748  }
1749}
1750
1751
1752void LCodeGen::DoBitI(LBitI* instr) {
1753  LOperand* left = instr->left();
1754  LOperand* right = instr->right();
1755  ASSERT(left->Equals(instr->result()));
1756  ASSERT(left->IsRegister());
1757
1758  if (right->IsConstantOperand()) {
1759    int32_t right_operand =
1760        ToRepresentation(LConstantOperand::cast(right),
1761                         instr->hydrogen()->representation());
1762    switch (instr->op()) {
1763      case Token::BIT_AND:
1764        __ and_(ToRegister(left), right_operand);
1765        break;
1766      case Token::BIT_OR:
1767        __ or_(ToRegister(left), right_operand);
1768        break;
1769      case Token::BIT_XOR:
1770        if (right_operand == int32_t(~0)) {
1771          __ not_(ToRegister(left));
1772        } else {
1773          __ xor_(ToRegister(left), right_operand);
1774        }
1775        break;
1776      default:
1777        UNREACHABLE();
1778        break;
1779    }
1780  } else {
1781    switch (instr->op()) {
1782      case Token::BIT_AND:
1783        __ and_(ToRegister(left), ToOperand(right));
1784        break;
1785      case Token::BIT_OR:
1786        __ or_(ToRegister(left), ToOperand(right));
1787        break;
1788      case Token::BIT_XOR:
1789        __ xor_(ToRegister(left), ToOperand(right));
1790        break;
1791      default:
1792        UNREACHABLE();
1793        break;
1794    }
1795  }
1796}
1797
1798
1799void LCodeGen::DoShiftI(LShiftI* instr) {
1800  LOperand* left = instr->left();
1801  LOperand* right = instr->right();
1802  ASSERT(left->Equals(instr->result()));
1803  ASSERT(left->IsRegister());
1804  if (right->IsRegister()) {
1805    ASSERT(ToRegister(right).is(ecx));
1806
1807    switch (instr->op()) {
1808      case Token::ROR:
1809        __ ror_cl(ToRegister(left));
1810        if (instr->can_deopt()) {
1811          __ test(ToRegister(left), ToRegister(left));
1812          DeoptimizeIf(sign, instr->environment());
1813        }
1814        break;
1815      case Token::SAR:
1816        __ sar_cl(ToRegister(left));
1817        break;
1818      case Token::SHR:
1819        __ shr_cl(ToRegister(left));
1820        if (instr->can_deopt()) {
1821          __ test(ToRegister(left), ToRegister(left));
1822          DeoptimizeIf(sign, instr->environment());
1823        }
1824        break;
1825      case Token::SHL:
1826        __ shl_cl(ToRegister(left));
1827        break;
1828      default:
1829        UNREACHABLE();
1830        break;
1831    }
1832  } else {
1833    int value = ToInteger32(LConstantOperand::cast(right));
1834    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1835    switch (instr->op()) {
1836      case Token::ROR:
1837        if (shift_count == 0 && instr->can_deopt()) {
1838          __ test(ToRegister(left), ToRegister(left));
1839          DeoptimizeIf(sign, instr->environment());
1840        } else {
1841          __ ror(ToRegister(left), shift_count);
1842        }
1843        break;
1844      case Token::SAR:
1845        if (shift_count != 0) {
1846          __ sar(ToRegister(left), shift_count);
1847        }
1848        break;
1849      case Token::SHR:
1850        if (shift_count == 0 && instr->can_deopt()) {
1851          __ test(ToRegister(left), ToRegister(left));
1852          DeoptimizeIf(sign, instr->environment());
1853        } else {
1854          __ shr(ToRegister(left), shift_count);
1855        }
1856        break;
1857      case Token::SHL:
1858        if (shift_count != 0) {
1859          if (instr->hydrogen_value()->representation().IsSmi() &&
1860              instr->can_deopt()) {
1861            if (shift_count != 1) {
1862              __ shl(ToRegister(left), shift_count - 1);
1863            }
1864            __ SmiTag(ToRegister(left));
1865            DeoptimizeIf(overflow, instr->environment());
1866          } else {
1867            __ shl(ToRegister(left), shift_count);
1868          }
1869        }
1870        break;
1871      default:
1872        UNREACHABLE();
1873        break;
1874    }
1875  }
1876}
1877
1878
1879void LCodeGen::DoSubI(LSubI* instr) {
1880  LOperand* left = instr->left();
1881  LOperand* right = instr->right();
1882  ASSERT(left->Equals(instr->result()));
1883
1884  if (right->IsConstantOperand()) {
1885    __ sub(ToOperand(left),
1886           ToImmediate(right, instr->hydrogen()->representation()));
1887  } else {
1888    __ sub(ToRegister(left), ToOperand(right));
1889  }
1890  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1891    DeoptimizeIf(overflow, instr->environment());
1892  }
1893}
1894
1895
1896void LCodeGen::DoConstantI(LConstantI* instr) {
1897  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1898}
1899
1900
1901void LCodeGen::DoConstantS(LConstantS* instr) {
1902  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1903}
1904
1905
1906void LCodeGen::DoConstantD(LConstantD* instr) {
1907  double v = instr->value();
1908  uint64_t int_val = BitCast<uint64_t, double>(v);
1909  int32_t lower = static_cast<int32_t>(int_val);
1910  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1911  ASSERT(instr->result()->IsDoubleRegister());
1912
1913  if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
1914    __ push(Immediate(upper));
1915    __ push(Immediate(lower));
1916    X87Register reg = ToX87Register(instr->result());
1917    X87Mov(reg, Operand(esp, 0));
1918    __ add(Operand(esp), Immediate(kDoubleSize));
1919  } else {
1920    CpuFeatureScope scope1(masm(), SSE2);
1921    XMMRegister res = ToDoubleRegister(instr->result());
1922    if (int_val == 0) {
1923      __ xorps(res, res);
1924    } else {
1925      Register temp = ToRegister(instr->temp());
1926      if (CpuFeatures::IsSupported(SSE4_1)) {
1927        CpuFeatureScope scope2(masm(), SSE4_1);
1928        if (lower != 0) {
1929          __ Set(temp, Immediate(lower));
1930          __ movd(res, Operand(temp));
1931          __ Set(temp, Immediate(upper));
1932          __ pinsrd(res, Operand(temp), 1);
1933        } else {
1934          __ xorps(res, res);
1935          __ Set(temp, Immediate(upper));
1936          __ pinsrd(res, Operand(temp), 1);
1937        }
1938      } else {
1939        __ Set(temp, Immediate(upper));
1940        __ movd(res, Operand(temp));
1941        __ psllq(res, 32);
1942        if (lower != 0) {
1943          XMMRegister xmm_scratch = double_scratch0();
1944          __ Set(temp, Immediate(lower));
1945          __ movd(xmm_scratch, Operand(temp));
1946          __ orps(res, xmm_scratch);
1947        }
1948      }
1949    }
1950  }
1951}
1952
1953
1954void LCodeGen::DoConstantE(LConstantE* instr) {
1955  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1956}
1957
1958
1959void LCodeGen::DoConstantT(LConstantT* instr) {
1960  Register reg = ToRegister(instr->result());
1961  Handle<Object> handle = instr->value(isolate());
1962  AllowDeferredHandleDereference smi_check;
1963  __ LoadObject(reg, handle);
1964}
1965
1966
1967void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1968  Register result = ToRegister(instr->result());
1969  Register map = ToRegister(instr->value());
1970  __ EnumLength(result, map);
1971}
1972
1973
1974void LCodeGen::DoElementsKind(LElementsKind* instr) {
1975  Register result = ToRegister(instr->result());
1976  Register input = ToRegister(instr->value());
1977
1978  // Load map into |result|.
1979  __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
1980  // Load the map's "bit field 2" into |result|. We only need the first byte,
1981  // but the following masking takes care of that anyway.
1982  __ mov(result, FieldOperand(result, Map::kBitField2Offset));
1983  // Retrieve elements_kind from bit field 2.
1984  __ and_(result, Map::kElementsKindMask);
1985  __ shr(result, Map::kElementsKindShift);
1986}
1987
1988
1989void LCodeGen::DoValueOf(LValueOf* instr) {
1990  Register input = ToRegister(instr->value());
1991  Register result = ToRegister(instr->result());
1992  Register map = ToRegister(instr->temp());
1993  ASSERT(input.is(result));
1994
1995  Label done;
1996
1997  if (!instr->hydrogen()->value()->IsHeapObject()) {
1998    // If the object is a smi return the object.
1999    __ JumpIfSmi(input, &done, Label::kNear);
2000  }
2001
2002  // If the object is not a value type, return the object.
2003  __ CmpObjectType(input, JS_VALUE_TYPE, map);
2004  __ j(not_equal, &done, Label::kNear);
2005  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
2006
2007  __ bind(&done);
2008}
2009
2010
2011void LCodeGen::DoDateField(LDateField* instr) {
2012  Register object = ToRegister(instr->date());
2013  Register result = ToRegister(instr->result());
2014  Register scratch = ToRegister(instr->temp());
2015  Smi* index = instr->index();
2016  Label runtime, done;
2017  ASSERT(object.is(result));
2018  ASSERT(object.is(eax));
2019
2020  __ test(object, Immediate(kSmiTagMask));
2021  DeoptimizeIf(zero, instr->environment());
2022  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
2023  DeoptimizeIf(not_equal, instr->environment());
2024
2025  if (index->value() == 0) {
2026    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
2027  } else {
2028    if (index->value() < JSDate::kFirstUncachedField) {
2029      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2030      __ mov(scratch, Operand::StaticVariable(stamp));
2031      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
2032      __ j(not_equal, &runtime, Label::kNear);
2033      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
2034                                          kPointerSize * index->value()));
2035      __ jmp(&done, Label::kNear);
2036    }
2037    __ bind(&runtime);
2038    __ PrepareCallCFunction(2, scratch);
2039    __ mov(Operand(esp, 0), object);
2040    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
2041    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2042    __ bind(&done);
2043  }
2044}
2045
2046
2047Operand LCodeGen::BuildSeqStringOperand(Register string,
2048                                        LOperand* index,
2049                                        String::Encoding encoding) {
2050  if (index->IsConstantOperand()) {
2051    int offset = ToRepresentation(LConstantOperand::cast(index),
2052                                  Representation::Integer32());
2053    if (encoding == String::TWO_BYTE_ENCODING) {
2054      offset *= kUC16Size;
2055    }
2056    STATIC_ASSERT(kCharSize == 1);
2057    return FieldOperand(string, SeqString::kHeaderSize + offset);
2058  }
2059  return FieldOperand(
2060      string, ToRegister(index),
2061      encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
2062      SeqString::kHeaderSize);
2063}
2064
2065
2066void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
2067  String::Encoding encoding = instr->hydrogen()->encoding();
2068  Register result = ToRegister(instr->result());
2069  Register string = ToRegister(instr->string());
2070
2071  if (FLAG_debug_code) {
2072    __ push(string);
2073    __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
2074    __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
2075
2076    __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
2077    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2078    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2079    __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
2080                             ? one_byte_seq_type : two_byte_seq_type));
2081    __ Check(equal, kUnexpectedStringType);
2082    __ pop(string);
2083  }
2084
2085  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2086  if (encoding == String::ONE_BYTE_ENCODING) {
2087    __ movzx_b(result, operand);
2088  } else {
2089    __ movzx_w(result, operand);
2090  }
2091}
2092
2093
2094void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2095  String::Encoding encoding = instr->hydrogen()->encoding();
2096  Register string = ToRegister(instr->string());
2097
2098  if (FLAG_debug_code) {
2099    Register value = ToRegister(instr->value());
2100    Register index = ToRegister(instr->index());
2101    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2102    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2103    int encoding_mask =
2104        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2105        ? one_byte_seq_type : two_byte_seq_type;
2106    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2107  }
2108
2109  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2110  if (instr->value()->IsConstantOperand()) {
2111    int value = ToRepresentation(LConstantOperand::cast(instr->value()),
2112                                 Representation::Integer32());
2113    ASSERT_LE(0, value);
2114    if (encoding == String::ONE_BYTE_ENCODING) {
2115      ASSERT_LE(value, String::kMaxOneByteCharCode);
2116      __ mov_b(operand, static_cast<int8_t>(value));
2117    } else {
2118      ASSERT_LE(value, String::kMaxUtf16CodeUnit);
2119      __ mov_w(operand, static_cast<int16_t>(value));
2120    }
2121  } else {
2122    Register value = ToRegister(instr->value());
2123    if (encoding == String::ONE_BYTE_ENCODING) {
2124      __ mov_b(operand, value);
2125    } else {
2126      __ mov_w(operand, value);
2127    }
2128  }
2129}
2130
2131
2132void LCodeGen::DoThrow(LThrow* instr) {
2133  __ push(ToOperand(instr->value()));
2134  ASSERT(ToRegister(instr->context()).is(esi));
2135  CallRuntime(Runtime::kThrow, 1, instr);
2136
2137  if (FLAG_debug_code) {
2138    Comment("Unreachable code.");
2139    __ int3();
2140  }
2141}
2142
2143
2144void LCodeGen::DoAddI(LAddI* instr) {
2145  LOperand* left = instr->left();
2146  LOperand* right = instr->right();
2147
2148  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2149    if (right->IsConstantOperand()) {
2150      int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2151                                        instr->hydrogen()->representation());
2152      __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2153    } else {
2154      Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2155      __ lea(ToRegister(instr->result()), address);
2156    }
2157  } else {
2158    if (right->IsConstantOperand()) {
2159      __ add(ToOperand(left),
2160             ToImmediate(right, instr->hydrogen()->representation()));
2161    } else {
2162      __ add(ToRegister(left), ToOperand(right));
2163    }
2164    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2165      DeoptimizeIf(overflow, instr->environment());
2166    }
2167  }
2168}
2169
2170
2171void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2172  CpuFeatureScope scope(masm(), SSE2);
2173  LOperand* left = instr->left();
2174  LOperand* right = instr->right();
2175  ASSERT(left->Equals(instr->result()));
2176  HMathMinMax::Operation operation = instr->hydrogen()->operation();
2177  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2178    Label return_left;
2179    Condition condition = (operation == HMathMinMax::kMathMin)
2180        ? less_equal
2181        : greater_equal;
2182    if (right->IsConstantOperand()) {
2183      Operand left_op = ToOperand(left);
2184      Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2185                                        instr->hydrogen()->representation());
2186      __ cmp(left_op, immediate);
2187      __ j(condition, &return_left, Label::kNear);
2188      __ mov(left_op, immediate);
2189    } else {
2190      Register left_reg = ToRegister(left);
2191      Operand right_op = ToOperand(right);
2192      __ cmp(left_reg, right_op);
2193      __ j(condition, &return_left, Label::kNear);
2194      __ mov(left_reg, right_op);
2195    }
2196    __ bind(&return_left);
2197  } else {
2198    ASSERT(instr->hydrogen()->representation().IsDouble());
2199    Label check_nan_left, check_zero, return_left, return_right;
2200    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2201    XMMRegister left_reg = ToDoubleRegister(left);
2202    XMMRegister right_reg = ToDoubleRegister(right);
2203    __ ucomisd(left_reg, right_reg);
2204    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
2205    __ j(equal, &check_zero, Label::kNear);  // left == right.
2206    __ j(condition, &return_left, Label::kNear);
2207    __ jmp(&return_right, Label::kNear);
2208
2209    __ bind(&check_zero);
2210    XMMRegister xmm_scratch = double_scratch0();
2211    __ xorps(xmm_scratch, xmm_scratch);
2212    __ ucomisd(left_reg, xmm_scratch);
2213    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
2214    // At this point, both left and right are either 0 or -0.
2215    if (operation == HMathMinMax::kMathMin) {
2216      __ orpd(left_reg, right_reg);
2217    } else {
2218      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2219      __ addsd(left_reg, right_reg);
2220    }
2221    __ jmp(&return_left, Label::kNear);
2222
2223    __ bind(&check_nan_left);
2224    __ ucomisd(left_reg, left_reg);  // NaN check.
2225    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
2226    __ bind(&return_right);
2227    __ movaps(left_reg, right_reg);
2228
2229    __ bind(&return_left);
2230  }
2231}
2232
2233
2234void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2235  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2236    CpuFeatureScope scope(masm(), SSE2);
2237    XMMRegister left = ToDoubleRegister(instr->left());
2238    XMMRegister right = ToDoubleRegister(instr->right());
2239    XMMRegister result = ToDoubleRegister(instr->result());
2240    switch (instr->op()) {
2241      case Token::ADD:
2242        __ addsd(left, right);
2243        break;
2244      case Token::SUB:
2245        __ subsd(left, right);
2246        break;
2247      case Token::MUL:
2248        __ mulsd(left, right);
2249        break;
2250      case Token::DIV:
2251        __ divsd(left, right);
2252        // Don't delete this mov. It may improve performance on some CPUs,
2253        // when there is a mulsd depending on the result
2254        __ movaps(left, left);
2255        break;
2256      case Token::MOD: {
2257        // Pass two doubles as arguments on the stack.
2258        __ PrepareCallCFunction(4, eax);
2259        __ movsd(Operand(esp, 0 * kDoubleSize), left);
2260        __ movsd(Operand(esp, 1 * kDoubleSize), right);
2261        __ CallCFunction(
2262            ExternalReference::double_fp_operation(Token::MOD, isolate()),
2263            4);
2264
2265        // Return value is in st(0) on ia32.
2266        // Store it into the result register.
2267        __ sub(Operand(esp), Immediate(kDoubleSize));
2268        __ fstp_d(Operand(esp, 0));
2269        __ movsd(result, Operand(esp, 0));
2270        __ add(Operand(esp), Immediate(kDoubleSize));
2271        break;
2272      }
2273      default:
2274        UNREACHABLE();
2275        break;
2276    }
2277  } else {
2278    X87Register left = ToX87Register(instr->left());
2279    X87Register right = ToX87Register(instr->right());
2280    X87Register result = ToX87Register(instr->result());
2281    if (instr->op() != Token::MOD) {
2282      X87PrepareBinaryOp(left, right, result);
2283    }
2284    switch (instr->op()) {
2285      case Token::ADD:
2286        __ fadd_i(1);
2287        break;
2288      case Token::SUB:
2289        __ fsub_i(1);
2290        break;
2291      case Token::MUL:
2292        __ fmul_i(1);
2293        break;
2294      case Token::DIV:
2295        __ fdiv_i(1);
2296        break;
2297      case Token::MOD: {
2298        // Pass two doubles as arguments on the stack.
2299        __ PrepareCallCFunction(4, eax);
2300        X87Mov(Operand(esp, 1 * kDoubleSize), right);
2301        X87Mov(Operand(esp, 0), left);
2302        X87Free(right);
2303        ASSERT(left.is(result));
2304        X87PrepareToWrite(result);
2305        __ CallCFunction(
2306            ExternalReference::double_fp_operation(Token::MOD, isolate()),
2307            4);
2308
2309        // Return value is in st(0) on ia32.
2310        X87CommitWrite(result);
2311        break;
2312      }
2313      default:
2314        UNREACHABLE();
2315        break;
2316    }
2317  }
2318}
2319
2320
2321void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2322  ASSERT(ToRegister(instr->context()).is(esi));
2323  ASSERT(ToRegister(instr->left()).is(edx));
2324  ASSERT(ToRegister(instr->right()).is(eax));
2325  ASSERT(ToRegister(instr->result()).is(eax));
2326
2327  BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2328  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2329  __ nop();  // Signals no inlined code.
2330}
2331
2332
2333template<class InstrType>
2334void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2335  int left_block = instr->TrueDestination(chunk_);
2336  int right_block = instr->FalseDestination(chunk_);
2337
2338  int next_block = GetNextEmittedBlock();
2339
2340  if (right_block == left_block || cc == no_condition) {
2341    EmitGoto(left_block);
2342  } else if (left_block == next_block) {
2343    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2344  } else if (right_block == next_block) {
2345    __ j(cc, chunk_->GetAssemblyLabel(left_block));
2346  } else {
2347    __ j(cc, chunk_->GetAssemblyLabel(left_block));
2348    __ jmp(chunk_->GetAssemblyLabel(right_block));
2349  }
2350}
2351
2352
2353template<class InstrType>
2354void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2355  int false_block = instr->FalseDestination(chunk_);
2356  if (cc == no_condition) {
2357    __ jmp(chunk_->GetAssemblyLabel(false_block));
2358  } else {
2359    __ j(cc, chunk_->GetAssemblyLabel(false_block));
2360  }
2361}
2362
2363
2364void LCodeGen::DoBranch(LBranch* instr) {
2365  Representation r = instr->hydrogen()->value()->representation();
2366  if (r.IsSmiOrInteger32()) {
2367    Register reg = ToRegister(instr->value());
2368    __ test(reg, Operand(reg));
2369    EmitBranch(instr, not_zero);
2370  } else if (r.IsDouble()) {
2371    ASSERT(!info()->IsStub());
2372    CpuFeatureScope scope(masm(), SSE2);
2373    XMMRegister reg = ToDoubleRegister(instr->value());
2374    XMMRegister xmm_scratch = double_scratch0();
2375    __ xorps(xmm_scratch, xmm_scratch);
2376    __ ucomisd(reg, xmm_scratch);
2377    EmitBranch(instr, not_equal);
2378  } else {
2379    ASSERT(r.IsTagged());
2380    Register reg = ToRegister(instr->value());
2381    HType type = instr->hydrogen()->value()->type();
2382    if (type.IsBoolean()) {
2383      ASSERT(!info()->IsStub());
2384      __ cmp(reg, factory()->true_value());
2385      EmitBranch(instr, equal);
2386    } else if (type.IsSmi()) {
2387      ASSERT(!info()->IsStub());
2388      __ test(reg, Operand(reg));
2389      EmitBranch(instr, not_equal);
2390    } else if (type.IsJSArray()) {
2391      ASSERT(!info()->IsStub());
2392      EmitBranch(instr, no_condition);
2393    } else if (type.IsHeapNumber()) {
2394      ASSERT(!info()->IsStub());
2395      CpuFeatureScope scope(masm(), SSE2);
2396      XMMRegister xmm_scratch = double_scratch0();
2397      __ xorps(xmm_scratch, xmm_scratch);
2398      __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2399      EmitBranch(instr, not_equal);
2400    } else if (type.IsString()) {
2401      ASSERT(!info()->IsStub());
2402      __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2403      EmitBranch(instr, not_equal);
2404    } else {
2405      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2406      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2407
2408      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2409        // undefined -> false.
2410        __ cmp(reg, factory()->undefined_value());
2411        __ j(equal, instr->FalseLabel(chunk_));
2412      }
2413      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2414        // true -> true.
2415        __ cmp(reg, factory()->true_value());
2416        __ j(equal, instr->TrueLabel(chunk_));
2417        // false -> false.
2418        __ cmp(reg, factory()->false_value());
2419        __ j(equal, instr->FalseLabel(chunk_));
2420      }
2421      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2422        // 'null' -> false.
2423        __ cmp(reg, factory()->null_value());
2424        __ j(equal, instr->FalseLabel(chunk_));
2425      }
2426
2427      if (expected.Contains(ToBooleanStub::SMI)) {
2428        // Smis: 0 -> false, all other -> true.
2429        __ test(reg, Operand(reg));
2430        __ j(equal, instr->FalseLabel(chunk_));
2431        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2432      } else if (expected.NeedsMap()) {
2433        // If we need a map later and have a Smi -> deopt.
2434        __ test(reg, Immediate(kSmiTagMask));
2435        DeoptimizeIf(zero, instr->environment());
2436      }
2437
2438      Register map = no_reg;  // Keep the compiler happy.
2439      if (expected.NeedsMap()) {
2440        map = ToRegister(instr->temp());
2441        ASSERT(!map.is(reg));
2442        __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2443
2444        if (expected.CanBeUndetectable()) {
2445          // Undetectable -> false.
2446          __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2447                    1 << Map::kIsUndetectable);
2448          __ j(not_zero, instr->FalseLabel(chunk_));
2449        }
2450      }
2451
2452      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2453        // spec object -> true.
2454        __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2455        __ j(above_equal, instr->TrueLabel(chunk_));
2456      }
2457
2458      if (expected.Contains(ToBooleanStub::STRING)) {
2459        // String value -> false iff empty.
2460        Label not_string;
2461        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2462        __ j(above_equal, &not_string, Label::kNear);
2463        __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2464        __ j(not_zero, instr->TrueLabel(chunk_));
2465        __ jmp(instr->FalseLabel(chunk_));
2466        __ bind(&not_string);
2467      }
2468
2469      if (expected.Contains(ToBooleanStub::SYMBOL)) {
2470        // Symbol value -> true.
2471        __ CmpInstanceType(map, SYMBOL_TYPE);
2472        __ j(equal, instr->TrueLabel(chunk_));
2473      }
2474
2475      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2476        // heap number -> false iff +0, -0, or NaN.
2477        Label not_heap_number;
2478        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2479               factory()->heap_number_map());
2480        __ j(not_equal, &not_heap_number, Label::kNear);
2481        if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2482          CpuFeatureScope scope(masm(), SSE2);
2483          XMMRegister xmm_scratch = double_scratch0();
2484          __ xorps(xmm_scratch, xmm_scratch);
2485          __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2486        } else {
2487          __ fldz();
2488          __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2489          __ FCmp();
2490        }
2491        __ j(zero, instr->FalseLabel(chunk_));
2492        __ jmp(instr->TrueLabel(chunk_));
2493        __ bind(&not_heap_number);
2494      }
2495
2496      if (!expected.IsGeneric()) {
2497        // We've seen something for the first time -> deopt.
2498        // This can only happen if we are not generic already.
2499        DeoptimizeIf(no_condition, instr->environment());
2500      }
2501    }
2502  }
2503}
2504
2505
2506void LCodeGen::EmitGoto(int block) {
2507  if (!IsNextEmittedBlock(block)) {
2508    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2509  }
2510}
2511
2512
2513void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2514}
2515
2516
2517void LCodeGen::DoGoto(LGoto* instr) {
2518  EmitGoto(instr->block_id());
2519}
2520
2521
2522Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2523  Condition cond = no_condition;
2524  switch (op) {
2525    case Token::EQ:
2526    case Token::EQ_STRICT:
2527      cond = equal;
2528      break;
2529    case Token::NE:
2530    case Token::NE_STRICT:
2531      cond = not_equal;
2532      break;
2533    case Token::LT:
2534      cond = is_unsigned ? below : less;
2535      break;
2536    case Token::GT:
2537      cond = is_unsigned ? above : greater;
2538      break;
2539    case Token::LTE:
2540      cond = is_unsigned ? below_equal : less_equal;
2541      break;
2542    case Token::GTE:
2543      cond = is_unsigned ? above_equal : greater_equal;
2544      break;
2545    case Token::IN:
2546    case Token::INSTANCEOF:
2547    default:
2548      UNREACHABLE();
2549  }
2550  return cond;
2551}
2552
2553
2554void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2555  LOperand* left = instr->left();
2556  LOperand* right = instr->right();
2557  Condition cc = TokenToCondition(instr->op(), instr->is_double());
2558
2559  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2560    // We can statically evaluate the comparison.
2561    double left_val = ToDouble(LConstantOperand::cast(left));
2562    double right_val = ToDouble(LConstantOperand::cast(right));
2563    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2564        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2565    EmitGoto(next_block);
2566  } else {
2567    if (instr->is_double()) {
2568      if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2569        CpuFeatureScope scope(masm(), SSE2);
2570        __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2571      } else {
2572        X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2573        __ FCmp();
2574      }
2575      // Don't base result on EFLAGS when a NaN is involved. Instead
2576      // jump to the false block.
2577      __ j(parity_even, instr->FalseLabel(chunk_));
2578    } else {
2579      if (right->IsConstantOperand()) {
2580        __ cmp(ToOperand(left),
2581               ToImmediate(right, instr->hydrogen()->representation()));
2582      } else if (left->IsConstantOperand()) {
2583        __ cmp(ToOperand(right),
2584               ToImmediate(left, instr->hydrogen()->representation()));
2585        // We transposed the operands. Reverse the condition.
2586        cc = ReverseCondition(cc);
2587      } else {
2588        __ cmp(ToRegister(left), ToOperand(right));
2589      }
2590    }
2591    EmitBranch(instr, cc);
2592  }
2593}
2594
2595
2596void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2597  Register left = ToRegister(instr->left());
2598
2599  if (instr->right()->IsConstantOperand()) {
2600    Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2601    __ CmpObject(left, right);
2602  } else {
2603    Operand right = ToOperand(instr->right());
2604    __ cmp(left, right);
2605  }
2606  EmitBranch(instr, equal);
2607}
2608
2609
2610void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2611  if (instr->hydrogen()->representation().IsTagged()) {
2612    Register input_reg = ToRegister(instr->object());
2613    __ cmp(input_reg, factory()->the_hole_value());
2614    EmitBranch(instr, equal);
2615    return;
2616  }
2617
2618  bool use_sse2 = CpuFeatures::IsSupported(SSE2);
2619  if (use_sse2) {
2620    CpuFeatureScope scope(masm(), SSE2);
2621    XMMRegister input_reg = ToDoubleRegister(instr->object());
2622    __ ucomisd(input_reg, input_reg);
2623    EmitFalseBranch(instr, parity_odd);
2624  } else {
2625    // Put the value to the top of stack
2626    X87Register src = ToX87Register(instr->object());
2627    X87LoadForUsage(src);
2628    __ fld(0);
2629    __ fld(0);
2630    __ FCmp();
2631    Label ok;
2632    __ j(parity_even, &ok, Label::kNear);
2633    __ fstp(0);
2634    EmitFalseBranch(instr, no_condition);
2635    __ bind(&ok);
2636  }
2637
2638
2639  __ sub(esp, Immediate(kDoubleSize));
2640  if (use_sse2) {
2641    CpuFeatureScope scope(masm(), SSE2);
2642    XMMRegister input_reg = ToDoubleRegister(instr->object());
2643    __ movsd(MemOperand(esp, 0), input_reg);
2644  } else {
2645    __ fstp_d(MemOperand(esp, 0));
2646  }
2647
2648  __ add(esp, Immediate(kDoubleSize));
2649  int offset = sizeof(kHoleNanUpper32);
2650  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2651  EmitBranch(instr, equal);
2652}
2653
2654
2655void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2656  Representation rep = instr->hydrogen()->value()->representation();
2657  ASSERT(!rep.IsInteger32());
2658  Register scratch = ToRegister(instr->temp());
2659
2660  if (rep.IsDouble()) {
2661    CpuFeatureScope use_sse2(masm(), SSE2);
2662    XMMRegister value = ToDoubleRegister(instr->value());
2663    XMMRegister xmm_scratch = double_scratch0();
2664    __ xorps(xmm_scratch, xmm_scratch);
2665    __ ucomisd(xmm_scratch, value);
2666    EmitFalseBranch(instr, not_equal);
2667    __ movmskpd(scratch, value);
2668    __ test(scratch, Immediate(1));
2669    EmitBranch(instr, not_zero);
2670  } else {
2671    Register value = ToRegister(instr->value());
2672    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
2673    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
2674    __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
2675           Immediate(0x80000000));
2676    EmitFalseBranch(instr, not_equal);
2677    __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
2678           Immediate(0x00000000));
2679    EmitBranch(instr, equal);
2680  }
2681}
2682
2683
2684Condition LCodeGen::EmitIsObject(Register input,
2685                                 Register temp1,
2686                                 Label* is_not_object,
2687                                 Label* is_object) {
2688  __ JumpIfSmi(input, is_not_object);
2689
2690  __ cmp(input, isolate()->factory()->null_value());
2691  __ j(equal, is_object);
2692
2693  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2694  // Undetectable objects behave like undefined.
2695  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2696            1 << Map::kIsUndetectable);
2697  __ j(not_zero, is_not_object);
2698
2699  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2700  __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
2701  __ j(below, is_not_object);
2702  __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
2703  return below_equal;
2704}
2705
2706
2707void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2708  Register reg = ToRegister(instr->value());
2709  Register temp = ToRegister(instr->temp());
2710
2711  Condition true_cond = EmitIsObject(
2712      reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2713
2714  EmitBranch(instr, true_cond);
2715}
2716
2717
2718Condition LCodeGen::EmitIsString(Register input,
2719                                 Register temp1,
2720                                 Label* is_not_string,
2721                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2722  if (check_needed == INLINE_SMI_CHECK) {
2723    __ JumpIfSmi(input, is_not_string);
2724  }
2725
2726  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2727
2728  return cond;
2729}
2730
2731
2732void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2733  Register reg = ToRegister(instr->value());
2734  Register temp = ToRegister(instr->temp());
2735
2736  SmiCheck check_needed =
2737      instr->hydrogen()->value()->IsHeapObject()
2738          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2739
2740  Condition true_cond = EmitIsString(
2741      reg, temp, instr->FalseLabel(chunk_), check_needed);
2742
2743  EmitBranch(instr, true_cond);
2744}
2745
2746
2747void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2748  Operand input = ToOperand(instr->value());
2749
2750  __ test(input, Immediate(kSmiTagMask));
2751  EmitBranch(instr, zero);
2752}
2753
2754
2755void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2756  Register input = ToRegister(instr->value());
2757  Register temp = ToRegister(instr->temp());
2758
2759  if (!instr->hydrogen()->value()->IsHeapObject()) {
2760    STATIC_ASSERT(kSmiTag == 0);
2761    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2762  }
2763  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2764  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2765            1 << Map::kIsUndetectable);
2766  EmitBranch(instr, not_zero);
2767}
2768
2769
2770static Condition ComputeCompareCondition(Token::Value op) {
2771  switch (op) {
2772    case Token::EQ_STRICT:
2773    case Token::EQ:
2774      return equal;
2775    case Token::LT:
2776      return less;
2777    case Token::GT:
2778      return greater;
2779    case Token::LTE:
2780      return less_equal;
2781    case Token::GTE:
2782      return greater_equal;
2783    default:
2784      UNREACHABLE();
2785      return no_condition;
2786  }
2787}
2788
2789
2790void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2791  Token::Value op = instr->op();
2792
2793  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2794  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2795
2796  Condition condition = ComputeCompareCondition(op);
2797  __ test(eax, Operand(eax));
2798
2799  EmitBranch(instr, condition);
2800}
2801
2802
2803static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2804  InstanceType from = instr->from();
2805  InstanceType to = instr->to();
2806  if (from == FIRST_TYPE) return to;
2807  ASSERT(from == to || to == LAST_TYPE);
2808  return from;
2809}
2810
2811
2812static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2813  InstanceType from = instr->from();
2814  InstanceType to = instr->to();
2815  if (from == to) return equal;
2816  if (to == LAST_TYPE) return above_equal;
2817  if (from == FIRST_TYPE) return below_equal;
2818  UNREACHABLE();
2819  return equal;
2820}
2821
2822
2823void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2824  Register input = ToRegister(instr->value());
2825  Register temp = ToRegister(instr->temp());
2826
2827  if (!instr->hydrogen()->value()->IsHeapObject()) {
2828    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2829  }
2830
2831  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2832  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2833}
2834
2835
2836void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2837  Register input = ToRegister(instr->value());
2838  Register result = ToRegister(instr->result());
2839
2840  __ AssertString(input);
2841
2842  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2843  __ IndexFromHash(result, result);
2844}
2845
2846
2847void LCodeGen::DoHasCachedArrayIndexAndBranch(
2848    LHasCachedArrayIndexAndBranch* instr) {
2849  Register input = ToRegister(instr->value());
2850
2851  __ test(FieldOperand(input, String::kHashFieldOffset),
2852          Immediate(String::kContainsCachedArrayIndexMask));
2853  EmitBranch(instr, equal);
2854}
2855
2856
2857// Branches to a label or falls through with the answer in the z flag.  Trashes
2858// the temp registers, but not the input.
2859void LCodeGen::EmitClassOfTest(Label* is_true,
2860                               Label* is_false,
2861                               Handle<String>class_name,
2862                               Register input,
2863                               Register temp,
2864                               Register temp2) {
2865  ASSERT(!input.is(temp));
2866  ASSERT(!input.is(temp2));
2867  ASSERT(!temp.is(temp2));
2868  __ JumpIfSmi(input, is_false);
2869
2870  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2871    // Assuming the following assertions, we can use the same compares to test
2872    // for both being a function type and being in the object type range.
2873    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2874    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2875                  FIRST_SPEC_OBJECT_TYPE + 1);
2876    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2877                  LAST_SPEC_OBJECT_TYPE - 1);
2878    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2879    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2880    __ j(below, is_false);
2881    __ j(equal, is_true);
2882    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2883    __ j(equal, is_true);
2884  } else {
2885    // Faster code path to avoid two compares: subtract lower bound from the
2886    // actual type and do a signed compare with the width of the type range.
2887    __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2888    __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2889    __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2890    __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2891                                     FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2892    __ j(above, is_false);
2893  }
2894
2895  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2896  // Check if the constructor in the map is a function.
2897  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2898  // Objects with a non-function constructor have class 'Object'.
2899  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2900  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2901    __ j(not_equal, is_true);
2902  } else {
2903    __ j(not_equal, is_false);
2904  }
2905
2906  // temp now contains the constructor function. Grab the
2907  // instance class name from there.
2908  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2909  __ mov(temp, FieldOperand(temp,
2910                            SharedFunctionInfo::kInstanceClassNameOffset));
2911  // The class name we are testing against is internalized since it's a literal.
2912  // The name in the constructor is internalized because of the way the context
2913  // is booted.  This routine isn't expected to work for random API-created
2914  // classes and it doesn't have to because you can't access it with natives
2915  // syntax.  Since both sides are internalized it is sufficient to use an
2916  // identity comparison.
2917  __ cmp(temp, class_name);
2918  // End with the answer in the z flag.
2919}
2920
2921
2922void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2923  Register input = ToRegister(instr->value());
2924  Register temp = ToRegister(instr->temp());
2925  Register temp2 = ToRegister(instr->temp2());
2926
2927  Handle<String> class_name = instr->hydrogen()->class_name();
2928
2929  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2930      class_name, input, temp, temp2);
2931
2932  EmitBranch(instr, equal);
2933}
2934
2935
2936void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2937  Register reg = ToRegister(instr->value());
2938  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2939  EmitBranch(instr, equal);
2940}
2941
2942
2943void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2944  // Object and function are in fixed registers defined by the stub.
2945  ASSERT(ToRegister(instr->context()).is(esi));
2946  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2947  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2948
2949  Label true_value, done;
2950  __ test(eax, Operand(eax));
2951  __ j(zero, &true_value, Label::kNear);
2952  __ mov(ToRegister(instr->result()), factory()->false_value());
2953  __ jmp(&done, Label::kNear);
2954  __ bind(&true_value);
2955  __ mov(ToRegister(instr->result()), factory()->true_value());
2956  __ bind(&done);
2957}
2958
2959
2960void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2961  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2962   public:
2963    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2964                                  LInstanceOfKnownGlobal* instr,
2965                                  const X87Stack& x87_stack)
2966        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2967    virtual void Generate() V8_OVERRIDE {
2968      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2969    }
2970    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2971    Label* map_check() { return &map_check_; }
2972   private:
2973    LInstanceOfKnownGlobal* instr_;
2974    Label map_check_;
2975  };
2976
2977  DeferredInstanceOfKnownGlobal* deferred;
2978  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
2979
2980  Label done, false_result;
2981  Register object = ToRegister(instr->value());
2982  Register temp = ToRegister(instr->temp());
2983
2984  // A Smi is not an instance of anything.
2985  __ JumpIfSmi(object, &false_result, Label::kNear);
2986
2987  // This is the inlined call site instanceof cache. The two occurences of the
2988  // hole value will be patched to the last map/result pair generated by the
2989  // instanceof stub.
2990  Label cache_miss;
2991  Register map = ToRegister(instr->temp());
2992  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2993  __ bind(deferred->map_check());  // Label for calculating code patching.
2994  Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2995  __ cmp(map, Operand::ForCell(cache_cell));  // Patched to cached map.
2996  __ j(not_equal, &cache_miss, Label::kNear);
2997  __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
2998  __ jmp(&done, Label::kNear);
2999
3000  // The inlined call site cache did not match. Check for null and string
3001  // before calling the deferred code.
3002  __ bind(&cache_miss);
3003  // Null is not an instance of anything.
3004  __ cmp(object, factory()->null_value());
3005  __ j(equal, &false_result, Label::kNear);
3006
3007  // String values are not instances of anything.
3008  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
3009  __ j(is_string, &false_result, Label::kNear);
3010
3011  // Go to the deferred code.
3012  __ jmp(deferred->entry());
3013
3014  __ bind(&false_result);
3015  __ mov(ToRegister(instr->result()), factory()->false_value());
3016
3017  // Here result has either true or false. Deferred code also produces true or
3018  // false object.
3019  __ bind(deferred->exit());
3020  __ bind(&done);
3021}
3022
3023
3024void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
3025                                               Label* map_check) {
3026  PushSafepointRegistersScope scope(this);
3027
3028  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3029  flags = static_cast<InstanceofStub::Flags>(
3030      flags | InstanceofStub::kArgsInRegisters);
3031  flags = static_cast<InstanceofStub::Flags>(
3032      flags | InstanceofStub::kCallSiteInlineCheck);
3033  flags = static_cast<InstanceofStub::Flags>(
3034      flags | InstanceofStub::kReturnTrueFalseObject);
3035  InstanceofStub stub(flags);
3036
3037  // Get the temp register reserved by the instruction. This needs to be a
3038  // register which is pushed last by PushSafepointRegisters as top of the
3039  // stack is used to pass the offset to the location of the map check to
3040  // the stub.
3041  Register temp = ToRegister(instr->temp());
3042  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
3043  __ LoadHeapObject(InstanceofStub::right(), instr->function());
3044  static const int kAdditionalDelta = 13;
3045  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
3046  __ mov(temp, Immediate(delta));
3047  __ StoreToSafepointRegisterSlot(temp, temp);
3048  CallCodeGeneric(stub.GetCode(isolate()),
3049                  RelocInfo::CODE_TARGET,
3050                  instr,
3051                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3052  // Get the deoptimization index of the LLazyBailout-environment that
3053  // corresponds to this instruction.
3054  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3055  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3056
3057  // Put the result value into the eax slot and restore all registers.
3058  __ StoreToSafepointRegisterSlot(eax, eax);
3059}
3060
3061
3062void LCodeGen::DoCmpT(LCmpT* instr) {
3063  Token::Value op = instr->op();
3064
3065  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
3066  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3067
3068  Condition condition = ComputeCompareCondition(op);
3069  Label true_value, done;
3070  __ test(eax, Operand(eax));
3071  __ j(condition, &true_value, Label::kNear);
3072  __ mov(ToRegister(instr->result()), factory()->false_value());
3073  __ jmp(&done, Label::kNear);
3074  __ bind(&true_value);
3075  __ mov(ToRegister(instr->result()), factory()->true_value());
3076  __ bind(&done);
3077}
3078
3079
3080void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
3081  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3082
3083  if (instr->has_constant_parameter_count()) {
3084    int parameter_count = ToInteger32(instr->constant_parameter_count());
3085    if (dynamic_frame_alignment && FLAG_debug_code) {
3086      __ cmp(Operand(esp,
3087                     (parameter_count + extra_value_count) * kPointerSize),
3088             Immediate(kAlignmentZapValue));
3089      __ Assert(equal, kExpectedAlignmentMarker);
3090    }
3091    __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
3092  } else {
3093    Register reg = ToRegister(instr->parameter_count());
3094    // The argument count parameter is a smi
3095    __ SmiUntag(reg);
3096    Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
3097    if (dynamic_frame_alignment && FLAG_debug_code) {
3098      ASSERT(extra_value_count == 2);
3099      __ cmp(Operand(esp, reg, times_pointer_size,
3100                     extra_value_count * kPointerSize),
3101             Immediate(kAlignmentZapValue));
3102      __ Assert(equal, kExpectedAlignmentMarker);
3103    }
3104
3105    // emit code to restore stack based on instr->parameter_count()
3106    __ pop(return_addr_reg);  // save return address
3107    if (dynamic_frame_alignment) {
3108      __ inc(reg);  // 1 more for alignment
3109    }
3110    __ shl(reg, kPointerSizeLog2);
3111    __ add(esp, reg);
3112    __ jmp(return_addr_reg);
3113  }
3114}
3115
3116
3117void LCodeGen::DoReturn(LReturn* instr) {
3118  if (FLAG_trace && info()->IsOptimizing()) {
3119    // Preserve the return value on the stack and rely on the runtime call
3120    // to return the value in the same register.  We're leaving the code
3121    // managed by the register allocator and tearing down the frame, it's
3122    // safe to write to the context register.
3123    __ push(eax);
3124    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3125    __ CallRuntime(Runtime::kTraceExit, 1);
3126  }
3127  if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
3128    RestoreCallerDoubles();
3129  }
3130  if (dynamic_frame_alignment_) {
3131    // Fetch the state of the dynamic frame alignment.
3132    __ mov(edx, Operand(ebp,
3133      JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3134  }
3135  int no_frame_start = -1;
3136  if (NeedsEagerFrame()) {
3137    __ mov(esp, ebp);
3138    __ pop(ebp);
3139    no_frame_start = masm_->pc_offset();
3140  }
3141  if (dynamic_frame_alignment_) {
3142    Label no_padding;
3143    __ cmp(edx, Immediate(kNoAlignmentPadding));
3144    __ j(equal, &no_padding, Label::kNear);
3145
3146    EmitReturn(instr, true);
3147    __ bind(&no_padding);
3148  }
3149
3150  EmitReturn(instr, false);
3151  if (no_frame_start != -1) {
3152    info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3153  }
3154}
3155
3156
3157void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3158  Register result = ToRegister(instr->result());
3159  __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3160  if (instr->hydrogen()->RequiresHoleCheck()) {
3161    __ cmp(result, factory()->the_hole_value());
3162    DeoptimizeIf(equal, instr->environment());
3163  }
3164}
3165
3166
3167void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3168  ASSERT(ToRegister(instr->context()).is(esi));
3169  ASSERT(ToRegister(instr->global_object()).is(edx));
3170  ASSERT(ToRegister(instr->result()).is(eax));
3171
3172  __ mov(ecx, instr->name());
3173  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
3174                                               RelocInfo::CODE_TARGET_CONTEXT;
3175  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3176  CallCode(ic, mode, instr);
3177}
3178
3179
3180void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3181  Register value = ToRegister(instr->value());
3182  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3183
3184  // If the cell we are storing to contains the hole it could have
3185  // been deleted from the property dictionary. In that case, we need
3186  // to update the property details in the property dictionary to mark
3187  // it as no longer deleted. We deoptimize in that case.
3188  if (instr->hydrogen()->RequiresHoleCheck()) {
3189    __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3190    DeoptimizeIf(equal, instr->environment());
3191  }
3192
3193  // Store the value.
3194  __ mov(Operand::ForCell(cell_handle), value);
3195  // Cells are always rescanned, so no write barrier here.
3196}
3197
3198
3199void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
3200  ASSERT(ToRegister(instr->context()).is(esi));
3201  ASSERT(ToRegister(instr->global_object()).is(edx));
3202  ASSERT(ToRegister(instr->value()).is(eax));
3203
3204  __ mov(ecx, instr->name());
3205  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3206      ? isolate()->builtins()->StoreIC_Initialize_Strict()
3207      : isolate()->builtins()->StoreIC_Initialize();
3208  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
3209}
3210
3211
3212void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3213  Register context = ToRegister(instr->context());
3214  Register result = ToRegister(instr->result());
3215  __ mov(result, ContextOperand(context, instr->slot_index()));
3216
3217  if (instr->hydrogen()->RequiresHoleCheck()) {
3218    __ cmp(result, factory()->the_hole_value());
3219    if (instr->hydrogen()->DeoptimizesOnHole()) {
3220      DeoptimizeIf(equal, instr->environment());
3221    } else {
3222      Label is_not_hole;
3223      __ j(not_equal, &is_not_hole, Label::kNear);
3224      __ mov(result, factory()->undefined_value());
3225      __ bind(&is_not_hole);
3226    }
3227  }
3228}
3229
3230
3231void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3232  Register context = ToRegister(instr->context());
3233  Register value = ToRegister(instr->value());
3234
3235  Label skip_assignment;
3236
3237  Operand target = ContextOperand(context, instr->slot_index());
3238  if (instr->hydrogen()->RequiresHoleCheck()) {
3239    __ cmp(target, factory()->the_hole_value());
3240    if (instr->hydrogen()->DeoptimizesOnHole()) {
3241      DeoptimizeIf(equal, instr->environment());
3242    } else {
3243      __ j(not_equal, &skip_assignment, Label::kNear);
3244    }
3245  }
3246
3247  __ mov(target, value);
3248  if (instr->hydrogen()->NeedsWriteBarrier()) {
3249    SmiCheck check_needed =
3250        instr->hydrogen()->value()->IsHeapObject()
3251            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3252    Register temp = ToRegister(instr->temp());
3253    int offset = Context::SlotOffset(instr->slot_index());
3254    __ RecordWriteContextSlot(context,
3255                              offset,
3256                              value,
3257                              temp,
3258                              GetSaveFPRegsMode(),
3259                              EMIT_REMEMBERED_SET,
3260                              check_needed);
3261  }
3262
3263  __ bind(&skip_assignment);
3264}
3265
3266
3267void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3268  HObjectAccess access = instr->hydrogen()->access();
3269  int offset = access.offset();
3270
3271  if (access.IsExternalMemory()) {
3272    Register result = ToRegister(instr->result());
3273    MemOperand operand = instr->object()->IsConstantOperand()
3274        ? MemOperand::StaticVariable(ToExternalReference(
3275                LConstantOperand::cast(instr->object())))
3276        : MemOperand(ToRegister(instr->object()), offset);
3277    __ Load(result, operand, access.representation());
3278    return;
3279  }
3280
3281  Register object = ToRegister(instr->object());
3282  if (FLAG_track_double_fields &&
3283      instr->hydrogen()->representation().IsDouble()) {
3284    if (CpuFeatures::IsSupported(SSE2)) {
3285      CpuFeatureScope scope(masm(), SSE2);
3286      XMMRegister result = ToDoubleRegister(instr->result());
3287      __ movsd(result, FieldOperand(object, offset));
3288    } else {
3289      X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3290    }
3291    return;
3292  }
3293
3294  Register result = ToRegister(instr->result());
3295  if (!access.IsInobject()) {
3296    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3297    object = result;
3298  }
3299  __ Load(result, FieldOperand(object, offset), access.representation());
3300}
3301
3302
3303void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3304  ASSERT(!operand->IsDoubleRegister());
3305  if (operand->IsConstantOperand()) {
3306    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3307    AllowDeferredHandleDereference smi_check;
3308    if (object->IsSmi()) {
3309      __ Push(Handle<Smi>::cast(object));
3310    } else {
3311      __ PushHeapObject(Handle<HeapObject>::cast(object));
3312    }
3313  } else if (operand->IsRegister()) {
3314    __ push(ToRegister(operand));
3315  } else {
3316    __ push(ToOperand(operand));
3317  }
3318}
3319
3320
3321void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3322  ASSERT(ToRegister(instr->context()).is(esi));
3323  ASSERT(ToRegister(instr->object()).is(edx));
3324  ASSERT(ToRegister(instr->result()).is(eax));
3325
3326  __ mov(ecx, instr->name());
3327  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3328  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3329}
3330
3331
3332void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3333  Register function = ToRegister(instr->function());
3334  Register temp = ToRegister(instr->temp());
3335  Register result = ToRegister(instr->result());
3336
3337  // Check that the function really is a function.
3338  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3339  DeoptimizeIf(not_equal, instr->environment());
3340
3341  // Check whether the function has an instance prototype.
3342  Label non_instance;
3343  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
3344            1 << Map::kHasNonInstancePrototype);
3345  __ j(not_zero, &non_instance, Label::kNear);
3346
3347  // Get the prototype or initial map from the function.
3348  __ mov(result,
3349         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3350
3351  // Check that the function has a prototype or an initial map.
3352  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3353  DeoptimizeIf(equal, instr->environment());
3354
3355  // If the function does not have an initial map, we're done.
3356  Label done;
3357  __ CmpObjectType(result, MAP_TYPE, temp);
3358  __ j(not_equal, &done, Label::kNear);
3359
3360  // Get the prototype from the initial map.
3361  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3362  __ jmp(&done, Label::kNear);
3363
3364  // Non-instance prototype: Fetch prototype from constructor field
3365  // in the function's map.
3366  __ bind(&non_instance);
3367  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
3368
3369  // All done.
3370  __ bind(&done);
3371}
3372
3373
3374void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3375  Register result = ToRegister(instr->result());
3376  __ LoadRoot(result, instr->index());
3377}
3378
3379
3380void LCodeGen::DoLoadExternalArrayPointer(
3381    LLoadExternalArrayPointer* instr) {
3382  Register result = ToRegister(instr->result());
3383  Register input = ToRegister(instr->object());
3384  __ mov(result, FieldOperand(input,
3385                              ExternalArray::kExternalPointerOffset));
3386}
3387
3388
3389void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3390  Register arguments = ToRegister(instr->arguments());
3391  Register result = ToRegister(instr->result());
3392  if (instr->length()->IsConstantOperand() &&
3393      instr->index()->IsConstantOperand()) {
3394    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3395    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3396    int index = (const_length - const_index) + 1;
3397    __ mov(result, Operand(arguments, index * kPointerSize));
3398  } else {
3399    Register length = ToRegister(instr->length());
3400    Operand index = ToOperand(instr->index());
3401    // There are two words between the frame pointer and the last argument.
3402    // Subtracting from length accounts for one of them add one more.
3403    __ sub(length, index);
3404    __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3405  }
3406}
3407
3408
3409void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3410  ElementsKind elements_kind = instr->elements_kind();
3411  LOperand* key = instr->key();
3412  if (!key->IsConstantOperand() &&
3413      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3414                                  elements_kind)) {
3415    __ SmiUntag(ToRegister(key));
3416  }
3417  Operand operand(BuildFastArrayOperand(
3418      instr->elements(),
3419      key,
3420      instr->hydrogen()->key()->representation(),
3421      elements_kind,
3422      0,
3423      instr->additional_index()));
3424  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3425    if (CpuFeatures::IsSupported(SSE2)) {
3426      CpuFeatureScope scope(masm(), SSE2);
3427      XMMRegister result(ToDoubleRegister(instr->result()));
3428      __ movss(result, operand);
3429      __ cvtss2sd(result, result);
3430    } else {
3431      X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3432    }
3433  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3434    if (CpuFeatures::IsSupported(SSE2)) {
3435      CpuFeatureScope scope(masm(), SSE2);
3436      __ movsd(ToDoubleRegister(instr->result()), operand);
3437    } else {
3438      X87Mov(ToX87Register(instr->result()), operand);
3439    }
3440  } else {
3441    Register result(ToRegister(instr->result()));
3442    switch (elements_kind) {
3443      case EXTERNAL_BYTE_ELEMENTS:
3444        __ movsx_b(result, operand);
3445        break;
3446      case EXTERNAL_PIXEL_ELEMENTS:
3447      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3448        __ movzx_b(result, operand);
3449        break;
3450      case EXTERNAL_SHORT_ELEMENTS:
3451        __ movsx_w(result, operand);
3452        break;
3453      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3454        __ movzx_w(result, operand);
3455        break;
3456      case EXTERNAL_INT_ELEMENTS:
3457        __ mov(result, operand);
3458        break;
3459      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3460        __ mov(result, operand);
3461        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3462          __ test(result, Operand(result));
3463          DeoptimizeIf(negative, instr->environment());
3464        }
3465        break;
3466      case EXTERNAL_FLOAT_ELEMENTS:
3467      case EXTERNAL_DOUBLE_ELEMENTS:
3468      case FAST_SMI_ELEMENTS:
3469      case FAST_ELEMENTS:
3470      case FAST_DOUBLE_ELEMENTS:
3471      case FAST_HOLEY_SMI_ELEMENTS:
3472      case FAST_HOLEY_ELEMENTS:
3473      case FAST_HOLEY_DOUBLE_ELEMENTS:
3474      case DICTIONARY_ELEMENTS:
3475      case NON_STRICT_ARGUMENTS_ELEMENTS:
3476        UNREACHABLE();
3477        break;
3478    }
3479  }
3480}
3481
3482
3483void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3484  if (instr->hydrogen()->RequiresHoleCheck()) {
3485    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3486        sizeof(kHoleNanLower32);
3487    Operand hole_check_operand = BuildFastArrayOperand(
3488        instr->elements(), instr->key(),
3489        instr->hydrogen()->key()->representation(),
3490        FAST_DOUBLE_ELEMENTS,
3491        offset,
3492        instr->additional_index());
3493    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3494    DeoptimizeIf(equal, instr->environment());
3495  }
3496
3497  Operand double_load_operand = BuildFastArrayOperand(
3498      instr->elements(),
3499      instr->key(),
3500      instr->hydrogen()->key()->representation(),
3501      FAST_DOUBLE_ELEMENTS,
3502      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3503      instr->additional_index());
3504  if (CpuFeatures::IsSupported(SSE2)) {
3505    CpuFeatureScope scope(masm(), SSE2);
3506    XMMRegister result = ToDoubleRegister(instr->result());
3507    __ movsd(result, double_load_operand);
3508  } else {
3509    X87Mov(ToX87Register(instr->result()), double_load_operand);
3510  }
3511}
3512
3513
3514void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3515  Register result = ToRegister(instr->result());
3516
3517  // Load the result.
3518  __ mov(result,
3519         BuildFastArrayOperand(instr->elements(),
3520                               instr->key(),
3521                               instr->hydrogen()->key()->representation(),
3522                               FAST_ELEMENTS,
3523                               FixedArray::kHeaderSize - kHeapObjectTag,
3524                               instr->additional_index()));
3525
3526  // Check for the hole value.
3527  if (instr->hydrogen()->RequiresHoleCheck()) {
3528    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3529      __ test(result, Immediate(kSmiTagMask));
3530      DeoptimizeIf(not_equal, instr->environment());
3531    } else {
3532      __ cmp(result, factory()->the_hole_value());
3533      DeoptimizeIf(equal, instr->environment());
3534    }
3535  }
3536}
3537
3538
3539void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3540  if (instr->is_external()) {
3541    DoLoadKeyedExternalArray(instr);
3542  } else if (instr->hydrogen()->representation().IsDouble()) {
3543    DoLoadKeyedFixedDoubleArray(instr);
3544  } else {
3545    DoLoadKeyedFixedArray(instr);
3546  }
3547}
3548
3549
3550Operand LCodeGen::BuildFastArrayOperand(
3551    LOperand* elements_pointer,
3552    LOperand* key,
3553    Representation key_representation,
3554    ElementsKind elements_kind,
3555    uint32_t offset,
3556    uint32_t additional_index) {
3557  Register elements_pointer_reg = ToRegister(elements_pointer);
3558  int element_shift_size = ElementsKindToShiftSize(elements_kind);
3559  int shift_size = element_shift_size;
3560  if (key->IsConstantOperand()) {
3561    int constant_value = ToInteger32(LConstantOperand::cast(key));
3562    if (constant_value & 0xF0000000) {
3563      Abort(kArrayIndexConstantValueTooBig);
3564    }
3565    return Operand(elements_pointer_reg,
3566                   ((constant_value + additional_index) << shift_size)
3567                       + offset);
3568  } else {
3569    // Take the tag bit into account while computing the shift size.
3570    if (key_representation.IsSmi() && (shift_size >= 1)) {
3571      shift_size -= kSmiTagSize;
3572    }
3573    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3574    return Operand(elements_pointer_reg,
3575                   ToRegister(key),
3576                   scale_factor,
3577                   offset + (additional_index << element_shift_size));
3578  }
3579}
3580
3581
3582void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3583  ASSERT(ToRegister(instr->context()).is(esi));
3584  ASSERT(ToRegister(instr->object()).is(edx));
3585  ASSERT(ToRegister(instr->key()).is(ecx));
3586
3587  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3588  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3589}
3590
3591
3592void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3593  Register result = ToRegister(instr->result());
3594
3595  if (instr->hydrogen()->from_inlined()) {
3596    __ lea(result, Operand(esp, -2 * kPointerSize));
3597  } else {
3598    // Check for arguments adapter frame.
3599    Label done, adapted;
3600    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3601    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3602    __ cmp(Operand(result),
3603           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3604    __ j(equal, &adapted, Label::kNear);
3605
3606    // No arguments adaptor frame.
3607    __ mov(result, Operand(ebp));
3608    __ jmp(&done, Label::kNear);
3609
3610    // Arguments adaptor frame present.
3611    __ bind(&adapted);
3612    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3613
3614    // Result is the frame pointer for the frame if not adapted and for the real
3615    // frame below the adaptor frame if adapted.
3616    __ bind(&done);
3617  }
3618}
3619
3620
3621void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3622  Operand elem = ToOperand(instr->elements());
3623  Register result = ToRegister(instr->result());
3624
3625  Label done;
3626
3627  // If no arguments adaptor frame the number of arguments is fixed.
3628  __ cmp(ebp, elem);
3629  __ mov(result, Immediate(scope()->num_parameters()));
3630  __ j(equal, &done, Label::kNear);
3631
3632  // Arguments adaptor frame present. Get argument length from there.
3633  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3634  __ mov(result, Operand(result,
3635                         ArgumentsAdaptorFrameConstants::kLengthOffset));
3636  __ SmiUntag(result);
3637
3638  // Argument length is in result register.
3639  __ bind(&done);
3640}
3641
3642
3643void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3644  Register receiver = ToRegister(instr->receiver());
3645  Register function = ToRegister(instr->function());
3646  Register scratch = ToRegister(instr->temp());
3647
3648  // If the receiver is null or undefined, we have to pass the global
3649  // object as a receiver to normal functions. Values have to be
3650  // passed unchanged to builtins and strict-mode functions.
3651  Label global_object, receiver_ok;
3652  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3653
3654  // Do not transform the receiver to object for strict mode
3655  // functions.
3656  __ mov(scratch,
3657         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3658  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3659            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3660  __ j(not_equal, &receiver_ok, dist);
3661
3662  // Do not transform the receiver to object for builtins.
3663  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3664            1 << SharedFunctionInfo::kNativeBitWithinByte);
3665  __ j(not_equal, &receiver_ok, dist);
3666
3667  // Normal function. Replace undefined or null with global receiver.
3668  __ cmp(receiver, factory()->null_value());
3669  __ j(equal, &global_object, Label::kNear);
3670  __ cmp(receiver, factory()->undefined_value());
3671  __ j(equal, &global_object, Label::kNear);
3672
3673  // The receiver should be a JS object.
3674  __ test(receiver, Immediate(kSmiTagMask));
3675  DeoptimizeIf(equal, instr->environment());
3676  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3677  DeoptimizeIf(below, instr->environment());
3678  __ jmp(&receiver_ok, Label::kNear);
3679
3680  __ bind(&global_object);
3681  // TODO(kmillikin): We have a hydrogen value for the global object.  See
3682  // if it's better to use it than to explicitly fetch it from the context
3683  // here.
3684  __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
3685  __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
3686  __ mov(receiver,
3687         FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
3688  __ bind(&receiver_ok);
3689}
3690
3691
3692void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3693  Register receiver = ToRegister(instr->receiver());
3694  Register function = ToRegister(instr->function());
3695  Register length = ToRegister(instr->length());
3696  Register elements = ToRegister(instr->elements());
3697  ASSERT(receiver.is(eax));  // Used for parameter count.
3698  ASSERT(function.is(edi));  // Required by InvokeFunction.
3699  ASSERT(ToRegister(instr->result()).is(eax));
3700
3701  // Copy the arguments to this function possibly from the
3702  // adaptor frame below it.
3703  const uint32_t kArgumentsLimit = 1 * KB;
3704  __ cmp(length, kArgumentsLimit);
3705  DeoptimizeIf(above, instr->environment());
3706
3707  __ push(receiver);
3708  __ mov(receiver, length);
3709
3710  // Loop through the arguments pushing them onto the execution
3711  // stack.
3712  Label invoke, loop;
3713  // length is a small non-negative integer, due to the test above.
3714  __ test(length, Operand(length));
3715  __ j(zero, &invoke, Label::kNear);
3716  __ bind(&loop);
3717  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3718  __ dec(length);
3719  __ j(not_zero, &loop);
3720
3721  // Invoke the function.
3722  __ bind(&invoke);
3723  ASSERT(instr->HasPointerMap());
3724  LPointerMap* pointers = instr->pointer_map();
3725  SafepointGenerator safepoint_generator(
3726      this, pointers, Safepoint::kLazyDeopt);
3727  ParameterCount actual(eax);
3728  __ InvokeFunction(function, actual, CALL_FUNCTION,
3729                    safepoint_generator, CALL_AS_METHOD);
3730}
3731
3732
3733void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3734  __ int3();
3735}
3736
3737
3738void LCodeGen::DoPushArgument(LPushArgument* instr) {
3739  LOperand* argument = instr->value();
3740  EmitPushTaggedOperand(argument);
3741}
3742
3743
3744void LCodeGen::DoDrop(LDrop* instr) {
3745  __ Drop(instr->count());
3746}
3747
3748
3749void LCodeGen::DoThisFunction(LThisFunction* instr) {
3750  Register result = ToRegister(instr->result());
3751  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3752}
3753
3754
3755void LCodeGen::DoContext(LContext* instr) {
3756  Register result = ToRegister(instr->result());
3757  if (info()->IsOptimizing()) {
3758    __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3759  } else {
3760    // If there is no frame, the context must be in esi.
3761    ASSERT(result.is(esi));
3762  }
3763}
3764
3765
3766void LCodeGen::DoOuterContext(LOuterContext* instr) {
3767  Register context = ToRegister(instr->context());
3768  Register result = ToRegister(instr->result());
3769  __ mov(result,
3770         Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3771}
3772
3773
3774void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3775  ASSERT(ToRegister(instr->context()).is(esi));
3776  __ push(esi);  // The context is the first argument.
3777  __ push(Immediate(instr->hydrogen()->pairs()));
3778  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3779  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3780}
3781
3782
3783void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3784  Register context = ToRegister(instr->context());
3785  Register result = ToRegister(instr->result());
3786  __ mov(result,
3787         Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3788}
3789
3790
3791void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3792  Register global = ToRegister(instr->global());
3793  Register result = ToRegister(instr->result());
3794  __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
3795}
3796
3797
3798void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3799                                 int formal_parameter_count,
3800                                 int arity,
3801                                 LInstruction* instr,
3802                                 CallKind call_kind,
3803                                 EDIState edi_state) {
3804  bool dont_adapt_arguments =
3805      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3806  bool can_invoke_directly =
3807      dont_adapt_arguments || formal_parameter_count == arity;
3808
3809  if (can_invoke_directly) {
3810    if (edi_state == EDI_UNINITIALIZED) {
3811      __ LoadHeapObject(edi, function);
3812    }
3813
3814    // Change context.
3815    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3816
3817    // Set eax to arguments count if adaption is not needed. Assumes that eax
3818    // is available to write to at this point.
3819    if (dont_adapt_arguments) {
3820      __ mov(eax, arity);
3821    }
3822
3823    // Invoke function directly.
3824    __ SetCallKind(ecx, call_kind);
3825    if (function.is_identical_to(info()->closure())) {
3826      __ CallSelf();
3827    } else {
3828      __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3829    }
3830    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3831  } else {
3832    // We need to adapt arguments.
3833    LPointerMap* pointers = instr->pointer_map();
3834    SafepointGenerator generator(
3835        this, pointers, Safepoint::kLazyDeopt);
3836    ParameterCount count(arity);
3837    ParameterCount expected(formal_parameter_count);
3838    __ InvokeFunction(
3839        function, expected, count, CALL_FUNCTION, generator, call_kind);
3840  }
3841}
3842
3843
3844void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3845  ASSERT(ToRegister(instr->result()).is(eax));
3846  CallKnownFunction(instr->hydrogen()->function(),
3847                    instr->hydrogen()->formal_parameter_count(),
3848                    instr->arity(),
3849                    instr,
3850                    CALL_AS_METHOD,
3851                    EDI_UNINITIALIZED);
3852}
3853
3854
3855void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3856  Register input_reg = ToRegister(instr->value());
3857  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3858         factory()->heap_number_map());
3859  DeoptimizeIf(not_equal, instr->environment());
3860
3861  Label slow, allocated, done;
3862  Register tmp = input_reg.is(eax) ? ecx : eax;
3863  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3864
3865  // Preserve the value of all registers.
3866  PushSafepointRegistersScope scope(this);
3867
3868  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3869  // Check the sign of the argument. If the argument is positive, just
3870  // return it. We do not need to patch the stack since |input| and
3871  // |result| are the same register and |input| will be restored
3872  // unchanged by popping safepoint registers.
3873  __ test(tmp, Immediate(HeapNumber::kSignMask));
3874  __ j(zero, &done, Label::kNear);
3875
3876  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3877  __ jmp(&allocated, Label::kNear);
3878
3879  // Slow case: Call the runtime system to do the number allocation.
3880  __ bind(&slow);
3881  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3882                          instr, instr->context());
3883  // Set the pointer to the new heap number in tmp.
3884  if (!tmp.is(eax)) __ mov(tmp, eax);
3885  // Restore input_reg after call to runtime.
3886  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3887
3888  __ bind(&allocated);
3889  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3890  __ and_(tmp2, ~HeapNumber::kSignMask);
3891  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3892  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3893  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3894  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3895
3896  __ bind(&done);
3897}
3898
3899
3900void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3901  Register input_reg = ToRegister(instr->value());
3902  __ test(input_reg, Operand(input_reg));
3903  Label is_positive;
3904  __ j(not_sign, &is_positive, Label::kNear);
3905  __ neg(input_reg);  // Sets flags.
3906  DeoptimizeIf(negative, instr->environment());
3907  __ bind(&is_positive);
3908}
3909
3910
3911void LCodeGen::DoMathAbs(LMathAbs* instr) {
3912  // Class for deferred case.
3913  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3914   public:
3915    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3916                                    LMathAbs* instr,
3917                                    const X87Stack& x87_stack)
3918        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3919    virtual void Generate() V8_OVERRIDE {
3920      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3921    }
3922    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3923   private:
3924    LMathAbs* instr_;
3925  };
3926
3927  ASSERT(instr->value()->Equals(instr->result()));
3928  Representation r = instr->hydrogen()->value()->representation();
3929
3930  CpuFeatureScope scope(masm(), SSE2);
3931  if (r.IsDouble()) {
3932    XMMRegister scratch = double_scratch0();
3933    XMMRegister input_reg = ToDoubleRegister(instr->value());
3934    __ xorps(scratch, scratch);
3935    __ subsd(scratch, input_reg);
3936    __ andps(input_reg, scratch);
3937  } else if (r.IsSmiOrInteger32()) {
3938    EmitIntegerMathAbs(instr);
3939  } else {  // Tagged case.
3940    DeferredMathAbsTaggedHeapNumber* deferred =
3941        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3942    Register input_reg = ToRegister(instr->value());
3943    // Smi check.
3944    __ JumpIfNotSmi(input_reg, deferred->entry());
3945    EmitIntegerMathAbs(instr);
3946    __ bind(deferred->exit());
3947  }
3948}
3949
3950
3951void LCodeGen::DoMathFloor(LMathFloor* instr) {
3952  CpuFeatureScope scope(masm(), SSE2);
3953  XMMRegister xmm_scratch = double_scratch0();
3954  Register output_reg = ToRegister(instr->result());
3955  XMMRegister input_reg = ToDoubleRegister(instr->value());
3956
3957  if (CpuFeatures::IsSupported(SSE4_1)) {
3958    CpuFeatureScope scope(masm(), SSE4_1);
3959    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3960      // Deoptimize on negative zero.
3961      Label non_zero;
3962      __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
3963      __ ucomisd(input_reg, xmm_scratch);
3964      __ j(not_equal, &non_zero, Label::kNear);
3965      __ movmskpd(output_reg, input_reg);
3966      __ test(output_reg, Immediate(1));
3967      DeoptimizeIf(not_zero, instr->environment());
3968      __ bind(&non_zero);
3969    }
3970    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3971    __ cvttsd2si(output_reg, Operand(xmm_scratch));
3972    // Overflow is signalled with minint.
3973    __ cmp(output_reg, 0x80000000u);
3974    DeoptimizeIf(equal, instr->environment());
3975  } else {
3976    Label negative_sign, done;
3977    // Deoptimize on unordered.
3978    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
3979    __ ucomisd(input_reg, xmm_scratch);
3980    DeoptimizeIf(parity_even, instr->environment());
3981    __ j(below, &negative_sign, Label::kNear);
3982
3983    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3984      // Check for negative zero.
3985      Label positive_sign;
3986      __ j(above, &positive_sign, Label::kNear);
3987      __ movmskpd(output_reg, input_reg);
3988      __ test(output_reg, Immediate(1));
3989      DeoptimizeIf(not_zero, instr->environment());
3990      __ Set(output_reg, Immediate(0));
3991      __ jmp(&done, Label::kNear);
3992      __ bind(&positive_sign);
3993    }
3994
3995    // Use truncating instruction (OK because input is positive).
3996    __ cvttsd2si(output_reg, Operand(input_reg));
3997    // Overflow is signalled with minint.
3998    __ cmp(output_reg, 0x80000000u);
3999    DeoptimizeIf(equal, instr->environment());
4000    __ jmp(&done, Label::kNear);
4001
4002    // Non-zero negative reaches here.
4003    __ bind(&negative_sign);
4004    // Truncate, then compare and compensate.
4005    __ cvttsd2si(output_reg, Operand(input_reg));
4006    __ Cvtsi2sd(xmm_scratch, output_reg);
4007    __ ucomisd(input_reg, xmm_scratch);
4008    __ j(equal, &done, Label::kNear);
4009    __ sub(output_reg, Immediate(1));
4010    DeoptimizeIf(overflow, instr->environment());
4011
4012    __ bind(&done);
4013  }
4014}
4015
4016
4017void LCodeGen::DoMathRound(LMathRound* instr) {
4018  CpuFeatureScope scope(masm(), SSE2);
4019  Register output_reg = ToRegister(instr->result());
4020  XMMRegister input_reg = ToDoubleRegister(instr->value());
4021  XMMRegister xmm_scratch = double_scratch0();
4022  XMMRegister input_temp = ToDoubleRegister(instr->temp());
4023  ExternalReference one_half = ExternalReference::address_of_one_half();
4024  ExternalReference minus_one_half =
4025      ExternalReference::address_of_minus_one_half();
4026
4027  Label done, round_to_zero, below_one_half, do_not_compensate;
4028  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4029
4030  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
4031  __ ucomisd(xmm_scratch, input_reg);
4032  __ j(above, &below_one_half, Label::kNear);
4033
4034  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
4035  __ addsd(xmm_scratch, input_reg);
4036  __ cvttsd2si(output_reg, Operand(xmm_scratch));
4037  // Overflow is signalled with minint.
4038  __ cmp(output_reg, 0x80000000u);
4039  __ RecordComment("D2I conversion overflow");
4040  DeoptimizeIf(equal, instr->environment());
4041  __ jmp(&done, dist);
4042
4043  __ bind(&below_one_half);
4044  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
4045  __ ucomisd(xmm_scratch, input_reg);
4046  __ j(below_equal, &round_to_zero, Label::kNear);
4047
4048  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
4049  // compare and compensate.
4050  __ movaps(input_temp, input_reg);  // Do not alter input_reg.
4051  __ subsd(input_temp, xmm_scratch);
4052  __ cvttsd2si(output_reg, Operand(input_temp));
4053  // Catch minint due to overflow, and to prevent overflow when compensating.
4054  __ cmp(output_reg, 0x80000000u);
4055  __ RecordComment("D2I conversion overflow");
4056  DeoptimizeIf(equal, instr->environment());
4057
4058  __ Cvtsi2sd(xmm_scratch, output_reg);
4059  __ ucomisd(xmm_scratch, input_temp);
4060  __ j(equal, &done, dist);
4061  __ sub(output_reg, Immediate(1));
4062  // No overflow because we already ruled out minint.
4063  __ jmp(&done, dist);
4064
4065  __ bind(&round_to_zero);
4066  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
4067  // we can ignore the difference between a result of -0 and +0.
4068  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4069    // If the sign is positive, we return +0.
4070    __ movmskpd(output_reg, input_reg);
4071    __ test(output_reg, Immediate(1));
4072    __ RecordComment("Minus zero");
4073    DeoptimizeIf(not_zero, instr->environment());
4074  }
4075  __ Set(output_reg, Immediate(0));
4076  __ bind(&done);
4077}
4078
4079
4080void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4081  CpuFeatureScope scope(masm(), SSE2);
4082  XMMRegister input_reg = ToDoubleRegister(instr->value());
4083  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4084  __ sqrtsd(input_reg, input_reg);
4085}
4086
4087
4088void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4089  CpuFeatureScope scope(masm(), SSE2);
4090  XMMRegister xmm_scratch = double_scratch0();
4091  XMMRegister input_reg = ToDoubleRegister(instr->value());
4092  Register scratch = ToRegister(instr->temp());
4093  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4094
4095  // Note that according to ECMA-262 15.8.2.13:
4096  // Math.pow(-Infinity, 0.5) == Infinity
4097  // Math.sqrt(-Infinity) == NaN
4098  Label done, sqrt;
4099  // Check base for -Infinity.  According to IEEE-754, single-precision
4100  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
4101  __ mov(scratch, 0xFF800000);
4102  __ movd(xmm_scratch, scratch);
4103  __ cvtss2sd(xmm_scratch, xmm_scratch);
4104  __ ucomisd(input_reg, xmm_scratch);
4105  // Comparing -Infinity with NaN results in "unordered", which sets the
4106  // zero flag as if both were equal.  However, it also sets the carry flag.
4107  __ j(not_equal, &sqrt, Label::kNear);
4108  __ j(carry, &sqrt, Label::kNear);
4109  // If input is -Infinity, return Infinity.
4110  __ xorps(input_reg, input_reg);
4111  __ subsd(input_reg, xmm_scratch);
4112  __ jmp(&done, Label::kNear);
4113
4114  // Square root.
4115  __ bind(&sqrt);
4116  __ xorps(xmm_scratch, xmm_scratch);
4117  __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
4118  __ sqrtsd(input_reg, input_reg);
4119  __ bind(&done);
4120}
4121
4122
4123void LCodeGen::DoPower(LPower* instr) {
4124  Representation exponent_type = instr->hydrogen()->right()->representation();
4125  // Having marked this as a call, we can use any registers.
4126  // Just make sure that the input/output registers are the expected ones.
4127  ASSERT(!instr->right()->IsDoubleRegister() ||
4128         ToDoubleRegister(instr->right()).is(xmm1));
4129  ASSERT(!instr->right()->IsRegister() ||
4130         ToRegister(instr->right()).is(eax));
4131  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4132  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4133
4134  if (exponent_type.IsSmi()) {
4135    MathPowStub stub(MathPowStub::TAGGED);
4136    __ CallStub(&stub);
4137  } else if (exponent_type.IsTagged()) {
4138    Label no_deopt;
4139    __ JumpIfSmi(eax, &no_deopt);
4140    __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
4141    DeoptimizeIf(not_equal, instr->environment());
4142    __ bind(&no_deopt);
4143    MathPowStub stub(MathPowStub::TAGGED);
4144    __ CallStub(&stub);
4145  } else if (exponent_type.IsInteger32()) {
4146    MathPowStub stub(MathPowStub::INTEGER);
4147    __ CallStub(&stub);
4148  } else {
4149    ASSERT(exponent_type.IsDouble());
4150    MathPowStub stub(MathPowStub::DOUBLE);
4151    __ CallStub(&stub);
4152  }
4153}
4154
4155
4156void LCodeGen::DoMathLog(LMathLog* instr) {
4157  CpuFeatureScope scope(masm(), SSE2);
4158  ASSERT(instr->value()->Equals(instr->result()));
4159  XMMRegister input_reg = ToDoubleRegister(instr->value());
4160  XMMRegister xmm_scratch = double_scratch0();
4161  Label positive, done, zero;
4162  __ xorps(xmm_scratch, xmm_scratch);
4163  __ ucomisd(input_reg, xmm_scratch);
4164  __ j(above, &positive, Label::kNear);
4165  __ j(equal, &zero, Label::kNear);
4166  ExternalReference nan =
4167      ExternalReference::address_of_canonical_non_hole_nan();
4168  __ movsd(input_reg, Operand::StaticVariable(nan));
4169  __ jmp(&done, Label::kNear);
4170  __ bind(&zero);
4171  ExternalReference ninf =
4172      ExternalReference::address_of_negative_infinity();
4173  __ movsd(input_reg, Operand::StaticVariable(ninf));
4174  __ jmp(&done, Label::kNear);
4175  __ bind(&positive);
4176  __ fldln2();
4177  __ sub(Operand(esp), Immediate(kDoubleSize));
4178  __ movsd(Operand(esp, 0), input_reg);
4179  __ fld_d(Operand(esp, 0));
4180  __ fyl2x();
4181  __ fstp_d(Operand(esp, 0));
4182  __ movsd(input_reg, Operand(esp, 0));
4183  __ add(Operand(esp), Immediate(kDoubleSize));
4184  __ bind(&done);
4185}
4186
4187
4188void LCodeGen::DoMathExp(LMathExp* instr) {
4189  CpuFeatureScope scope(masm(), SSE2);
4190  XMMRegister input = ToDoubleRegister(instr->value());
4191  XMMRegister result = ToDoubleRegister(instr->result());
4192  XMMRegister temp0 = double_scratch0();
4193  Register temp1 = ToRegister(instr->temp1());
4194  Register temp2 = ToRegister(instr->temp2());
4195
4196  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4197}
4198
4199
4200void LCodeGen::DoMathTan(LMathTan* instr) {
4201  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
4202  // Set the context register to a GC-safe fake value. Clobbering it is
4203  // OK because this instruction is marked as a call.
4204  __ Set(esi, Immediate(0));
4205  TranscendentalCacheStub stub(TranscendentalCache::TAN,
4206                               TranscendentalCacheStub::UNTAGGED);
4207  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4208}
4209
4210
4211void LCodeGen::DoMathCos(LMathCos* instr) {
4212  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
4213  // Set the context register to a GC-safe fake value. Clobbering it is
4214  // OK because this instruction is marked as a call.
4215  __ Set(esi, Immediate(0));
4216  TranscendentalCacheStub stub(TranscendentalCache::COS,
4217                               TranscendentalCacheStub::UNTAGGED);
4218  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4219}
4220
4221
4222void LCodeGen::DoMathSin(LMathSin* instr) {
4223  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
4224  // Set the context register to a GC-safe fake value. Clobbering it is
4225  // OK because this instruction is marked as a call.
4226  __ Set(esi, Immediate(0));
4227  TranscendentalCacheStub stub(TranscendentalCache::SIN,
4228                               TranscendentalCacheStub::UNTAGGED);
4229  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4230}
4231
4232
4233void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4234  ASSERT(ToRegister(instr->context()).is(esi));
4235  ASSERT(ToRegister(instr->function()).is(edi));
4236  ASSERT(instr->HasPointerMap());
4237
4238  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4239  if (known_function.is_null()) {
4240    LPointerMap* pointers = instr->pointer_map();
4241    SafepointGenerator generator(
4242        this, pointers, Safepoint::kLazyDeopt);
4243    ParameterCount count(instr->arity());
4244    __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
4245  } else {
4246    CallKnownFunction(known_function,
4247                      instr->hydrogen()->formal_parameter_count(),
4248                      instr->arity(),
4249                      instr,
4250                      CALL_AS_METHOD,
4251                      EDI_CONTAINS_TARGET);
4252  }
4253}
4254
4255
4256void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
4257  ASSERT(ToRegister(instr->context()).is(esi));
4258  ASSERT(ToRegister(instr->key()).is(ecx));
4259  ASSERT(ToRegister(instr->result()).is(eax));
4260
4261  int arity = instr->arity();
4262  Handle<Code> ic =
4263      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
4264  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4265}
4266
4267
4268void LCodeGen::DoCallNamed(LCallNamed* instr) {
4269  ASSERT(ToRegister(instr->context()).is(esi));
4270  ASSERT(ToRegister(instr->result()).is(eax));
4271
4272  int arity = instr->arity();
4273  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
4274  Handle<Code> ic =
4275      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4276  __ mov(ecx, instr->name());
4277  CallCode(ic, mode, instr);
4278}
4279
4280
4281void LCodeGen::DoCallFunction(LCallFunction* instr) {
4282  ASSERT(ToRegister(instr->context()).is(esi));
4283  ASSERT(ToRegister(instr->function()).is(edi));
4284  ASSERT(ToRegister(instr->result()).is(eax));
4285
4286  int arity = instr->arity();
4287  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
4288  if (instr->hydrogen()->IsTailCall()) {
4289    if (NeedsEagerFrame()) __ leave();
4290    __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
4291  } else {
4292    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4293  }
4294}
4295
4296
4297void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
4298  ASSERT(ToRegister(instr->context()).is(esi));
4299  ASSERT(ToRegister(instr->result()).is(eax));
4300
4301  int arity = instr->arity();
4302  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
4303  Handle<Code> ic =
4304      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4305  __ mov(ecx, instr->name());
4306  CallCode(ic, mode, instr);
4307}
4308
4309
4310void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
4311  ASSERT(ToRegister(instr->result()).is(eax));
4312  CallKnownFunction(instr->hydrogen()->target(),
4313                    instr->hydrogen()->formal_parameter_count(),
4314                    instr->arity(),
4315                    instr,
4316                    CALL_AS_FUNCTION,
4317                    EDI_UNINITIALIZED);
4318}
4319
4320
4321void LCodeGen::DoCallNew(LCallNew* instr) {
4322  ASSERT(ToRegister(instr->context()).is(esi));
4323  ASSERT(ToRegister(instr->constructor()).is(edi));
4324  ASSERT(ToRegister(instr->result()).is(eax));
4325
4326  // No cell in ebx for construct type feedback in optimized code
4327  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
4328  __ mov(ebx, Immediate(undefined_value));
4329  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4330  __ Set(eax, Immediate(instr->arity()));
4331  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4332}
4333
4334
4335void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4336  ASSERT(ToRegister(instr->context()).is(esi));
4337  ASSERT(ToRegister(instr->constructor()).is(edi));
4338  ASSERT(ToRegister(instr->result()).is(eax));
4339
4340  __ Set(eax, Immediate(instr->arity()));
4341  __ mov(ebx, instr->hydrogen()->property_cell());
4342  ElementsKind kind = instr->hydrogen()->elements_kind();
4343  AllocationSiteOverrideMode override_mode =
4344      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4345          ? DISABLE_ALLOCATION_SITES
4346          : DONT_OVERRIDE;
4347  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
4348
4349  if (instr->arity() == 0) {
4350    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
4351    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4352  } else if (instr->arity() == 1) {
4353    Label done;
4354    if (IsFastPackedElementsKind(kind)) {
4355      Label packed_case;
4356      // We might need a change here
4357      // look at the first argument
4358      __ mov(ecx, Operand(esp, 0));
4359      __ test(ecx, ecx);
4360      __ j(zero, &packed_case, Label::kNear);
4361
4362      ElementsKind holey_kind = GetHoleyElementsKind(kind);
4363      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
4364                                              override_mode);
4365      CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4366      __ jmp(&done, Label::kNear);
4367      __ bind(&packed_case);
4368    }
4369
4370    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
4371    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4372    __ bind(&done);
4373  } else {
4374    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
4375    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4376  }
4377}
4378
4379
4380void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4381  ASSERT(ToRegister(instr->context()).is(esi));
4382  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4383}
4384
4385
4386void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4387  Register function = ToRegister(instr->function());
4388  Register code_object = ToRegister(instr->code_object());
4389  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4390  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4391}
4392
4393
4394void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4395  Register result = ToRegister(instr->result());
4396  Register base = ToRegister(instr->base_object());
4397  if (instr->offset()->IsConstantOperand()) {
4398    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4399    __ lea(result, Operand(base, ToInteger32(offset)));
4400  } else {
4401    Register offset = ToRegister(instr->offset());
4402    __ lea(result, Operand(base, offset, times_1, 0));
4403  }
4404}
4405
4406
4407void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4408  Representation representation = instr->representation();
4409
4410  HObjectAccess access = instr->hydrogen()->access();
4411  int offset = access.offset();
4412
4413  if (access.IsExternalMemory()) {
4414    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4415    MemOperand operand = instr->object()->IsConstantOperand()
4416        ? MemOperand::StaticVariable(
4417            ToExternalReference(LConstantOperand::cast(instr->object())))
4418        : MemOperand(ToRegister(instr->object()), offset);
4419    if (instr->value()->IsConstantOperand()) {
4420      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4421      __ mov(operand, Immediate(ToInteger32(operand_value)));
4422    } else {
4423      Register value = ToRegister(instr->value());
4424      __ Store(value, operand, representation);
4425    }
4426    return;
4427  }
4428
4429  Register object = ToRegister(instr->object());
4430  Handle<Map> transition = instr->transition();
4431
4432  if (FLAG_track_fields && representation.IsSmi()) {
4433    if (instr->value()->IsConstantOperand()) {
4434      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4435      if (!IsSmi(operand_value)) {
4436        DeoptimizeIf(no_condition, instr->environment());
4437      }
4438    }
4439  } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4440    if (instr->value()->IsConstantOperand()) {
4441      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4442      if (IsInteger32(operand_value)) {
4443        DeoptimizeIf(no_condition, instr->environment());
4444      }
4445    } else {
4446      if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4447        Register value = ToRegister(instr->value());
4448        __ test(value, Immediate(kSmiTagMask));
4449        DeoptimizeIf(zero, instr->environment());
4450      }
4451    }
4452  } else if (FLAG_track_double_fields && representation.IsDouble()) {
4453    ASSERT(transition.is_null());
4454    ASSERT(access.IsInobject());
4455    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4456    if (CpuFeatures::IsSupported(SSE2)) {
4457      CpuFeatureScope scope(masm(), SSE2);
4458      XMMRegister value = ToDoubleRegister(instr->value());
4459      __ movsd(FieldOperand(object, offset), value);
4460    } else {
4461      X87Register value = ToX87Register(instr->value());
4462      X87Mov(FieldOperand(object, offset), value);
4463    }
4464    return;
4465  }
4466
4467  if (!transition.is_null()) {
4468    if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4469      __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4470    } else {
4471      Register temp = ToRegister(instr->temp());
4472      Register temp_map = ToRegister(instr->temp_map());
4473      __ mov(temp_map, transition);
4474      __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4475      // Update the write barrier for the map field.
4476      __ RecordWriteField(object,
4477                          HeapObject::kMapOffset,
4478                          temp_map,
4479                          temp,
4480                          GetSaveFPRegsMode(),
4481                          OMIT_REMEMBERED_SET,
4482                          OMIT_SMI_CHECK);
4483    }
4484  }
4485
4486  // Do the store.
4487  SmiCheck check_needed =
4488      instr->hydrogen()->value()->IsHeapObject()
4489          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4490
4491  Register write_register = object;
4492  if (!access.IsInobject()) {
4493    write_register = ToRegister(instr->temp());
4494    __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4495  }
4496
4497  MemOperand operand = FieldOperand(write_register, offset);
4498  if (instr->value()->IsConstantOperand()) {
4499    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4500    if (operand_value->IsRegister()) {
4501      Register value = ToRegister(operand_value);
4502      __ Store(value, operand, representation);
4503    } else if (representation.IsInteger32()) {
4504      Immediate immediate = ToImmediate(operand_value, representation);
4505      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4506      __ mov(operand, immediate);
4507    } else {
4508      Handle<Object> handle_value = ToHandle(operand_value);
4509      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4510      __ mov(operand, handle_value);
4511    }
4512  } else {
4513    Register value = ToRegister(instr->value());
4514    __ Store(value, operand, representation);
4515  }
4516
4517  if (instr->hydrogen()->NeedsWriteBarrier()) {
4518    Register value = ToRegister(instr->value());
4519    Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4520    // Update the write barrier for the object for in-object properties.
4521    __ RecordWriteField(write_register,
4522                        offset,
4523                        value,
4524                        temp,
4525                        GetSaveFPRegsMode(),
4526                        EMIT_REMEMBERED_SET,
4527                        check_needed);
4528  }
4529}
4530
4531
4532void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4533  ASSERT(ToRegister(instr->context()).is(esi));
4534  ASSERT(ToRegister(instr->object()).is(edx));
4535  ASSERT(ToRegister(instr->value()).is(eax));
4536
4537  __ mov(ecx, instr->name());
4538  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4539      ? isolate()->builtins()->StoreIC_Initialize_Strict()
4540      : isolate()->builtins()->StoreIC_Initialize();
4541  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4542}
4543
4544
4545void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4546  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4547    Label done;
4548    __ j(NegateCondition(cc), &done, Label::kNear);
4549    __ int3();
4550    __ bind(&done);
4551  } else {
4552    DeoptimizeIf(cc, check->environment());
4553  }
4554}
4555
4556
4557void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4558  if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
4559
4560  if (instr->index()->IsConstantOperand()) {
4561    Immediate immediate =
4562        ToImmediate(LConstantOperand::cast(instr->index()),
4563                    instr->hydrogen()->length()->representation());
4564    __ cmp(ToOperand(instr->length()), immediate);
4565    Condition condition =
4566        instr->hydrogen()->allow_equality() ? below : below_equal;
4567    ApplyCheckIf(condition, instr);
4568  } else {
4569    __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4570    Condition condition =
4571        instr->hydrogen()->allow_equality() ? above : above_equal;
4572    ApplyCheckIf(condition, instr);
4573  }
4574}
4575
4576
4577void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4578  ElementsKind elements_kind = instr->elements_kind();
4579  LOperand* key = instr->key();
4580  if (!key->IsConstantOperand() &&
4581      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4582                                  elements_kind)) {
4583    __ SmiUntag(ToRegister(key));
4584  }
4585  Operand operand(BuildFastArrayOperand(
4586      instr->elements(),
4587      key,
4588      instr->hydrogen()->key()->representation(),
4589      elements_kind,
4590      0,
4591      instr->additional_index()));
4592  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4593    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4594      CpuFeatureScope scope(masm(), SSE2);
4595      XMMRegister xmm_scratch = double_scratch0();
4596      __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4597      __ movss(operand, xmm_scratch);
4598    } else {
4599      __ fld(0);
4600      __ fstp_s(operand);
4601    }
4602  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4603    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4604      CpuFeatureScope scope(masm(), SSE2);
4605      __ movsd(operand, ToDoubleRegister(instr->value()));
4606    } else {
4607      X87Mov(operand, ToX87Register(instr->value()));
4608    }
4609  } else {
4610    Register value = ToRegister(instr->value());
4611    switch (elements_kind) {
4612      case EXTERNAL_PIXEL_ELEMENTS:
4613      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4614      case EXTERNAL_BYTE_ELEMENTS:
4615        __ mov_b(operand, value);
4616        break;
4617      case EXTERNAL_SHORT_ELEMENTS:
4618      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4619        __ mov_w(operand, value);
4620        break;
4621      case EXTERNAL_INT_ELEMENTS:
4622      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4623        __ mov(operand, value);
4624        break;
4625      case EXTERNAL_FLOAT_ELEMENTS:
4626      case EXTERNAL_DOUBLE_ELEMENTS:
4627      case FAST_SMI_ELEMENTS:
4628      case FAST_ELEMENTS:
4629      case FAST_DOUBLE_ELEMENTS:
4630      case FAST_HOLEY_SMI_ELEMENTS:
4631      case FAST_HOLEY_ELEMENTS:
4632      case FAST_HOLEY_DOUBLE_ELEMENTS:
4633      case DICTIONARY_ELEMENTS:
4634      case NON_STRICT_ARGUMENTS_ELEMENTS:
4635        UNREACHABLE();
4636        break;
4637    }
4638  }
4639}
4640
4641
4642void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4643  ExternalReference canonical_nan_reference =
4644      ExternalReference::address_of_canonical_non_hole_nan();
4645  Operand double_store_operand = BuildFastArrayOperand(
4646      instr->elements(),
4647      instr->key(),
4648      instr->hydrogen()->key()->representation(),
4649      FAST_DOUBLE_ELEMENTS,
4650      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
4651      instr->additional_index());
4652
4653  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4654    CpuFeatureScope scope(masm(), SSE2);
4655    XMMRegister value = ToDoubleRegister(instr->value());
4656
4657    if (instr->NeedsCanonicalization()) {
4658      Label have_value;
4659
4660      __ ucomisd(value, value);
4661      __ j(parity_odd, &have_value, Label::kNear);  // NaN.
4662
4663      __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4664      __ bind(&have_value);
4665    }
4666
4667    __ movsd(double_store_operand, value);
4668  } else {
4669    // Can't use SSE2 in the serializer
4670    if (instr->hydrogen()->IsConstantHoleStore()) {
4671      // This means we should store the (double) hole. No floating point
4672      // registers required.
4673      double nan_double = FixedDoubleArray::hole_nan_as_double();
4674      uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4675      int32_t lower = static_cast<int32_t>(int_val);
4676      int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4677
4678      __ mov(double_store_operand, Immediate(lower));
4679      Operand double_store_operand2 = BuildFastArrayOperand(
4680          instr->elements(),
4681          instr->key(),
4682          instr->hydrogen()->key()->representation(),
4683          FAST_DOUBLE_ELEMENTS,
4684          FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
4685          instr->additional_index());
4686      __ mov(double_store_operand2, Immediate(upper));
4687    } else {
4688      Label no_special_nan_handling;
4689      X87Register value = ToX87Register(instr->value());
4690      X87Fxch(value);
4691
4692      if (instr->NeedsCanonicalization()) {
4693        __ fld(0);
4694        __ fld(0);
4695        __ FCmp();
4696
4697        __ j(parity_odd, &no_special_nan_handling, Label::kNear);
4698        __ sub(esp, Immediate(kDoubleSize));
4699        __ fst_d(MemOperand(esp, 0));
4700        __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4701               Immediate(kHoleNanUpper32));
4702        __ add(esp, Immediate(kDoubleSize));
4703        Label canonicalize;
4704        __ j(not_equal, &canonicalize, Label::kNear);
4705        __ jmp(&no_special_nan_handling, Label::kNear);
4706        __ bind(&canonicalize);
4707        __ fstp(0);
4708        __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4709      }
4710
4711      __ bind(&no_special_nan_handling);
4712      __ fst_d(double_store_operand);
4713    }
4714  }
4715}
4716
4717
4718void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4719  Register elements = ToRegister(instr->elements());
4720  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4721
4722  Operand operand = BuildFastArrayOperand(
4723      instr->elements(),
4724      instr->key(),
4725      instr->hydrogen()->key()->representation(),
4726      FAST_ELEMENTS,
4727      FixedArray::kHeaderSize - kHeapObjectTag,
4728      instr->additional_index());
4729  if (instr->value()->IsRegister()) {
4730    __ mov(operand, ToRegister(instr->value()));
4731  } else {
4732    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4733    if (IsSmi(operand_value)) {
4734      Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4735      __ mov(operand, immediate);
4736    } else {
4737      ASSERT(!IsInteger32(operand_value));
4738      Handle<Object> handle_value = ToHandle(operand_value);
4739      __ mov(operand, handle_value);
4740    }
4741  }
4742
4743  if (instr->hydrogen()->NeedsWriteBarrier()) {
4744    ASSERT(instr->value()->IsRegister());
4745    Register value = ToRegister(instr->value());
4746    ASSERT(!instr->key()->IsConstantOperand());
4747    SmiCheck check_needed =
4748        instr->hydrogen()->value()->IsHeapObject()
4749          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4750    // Compute address of modified element and store it into key register.
4751    __ lea(key, operand);
4752    __ RecordWrite(elements,
4753                   key,
4754                   value,
4755                   GetSaveFPRegsMode(),
4756                   EMIT_REMEMBERED_SET,
4757                   check_needed);
4758  }
4759}
4760
4761
4762void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4763  // By cases...external, fast-double, fast
4764  if (instr->is_external()) {
4765    DoStoreKeyedExternalArray(instr);
4766  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4767    DoStoreKeyedFixedDoubleArray(instr);
4768  } else {
4769    DoStoreKeyedFixedArray(instr);
4770  }
4771}
4772
4773
4774void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4775  ASSERT(ToRegister(instr->context()).is(esi));
4776  ASSERT(ToRegister(instr->object()).is(edx));
4777  ASSERT(ToRegister(instr->key()).is(ecx));
4778  ASSERT(ToRegister(instr->value()).is(eax));
4779
4780  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4781      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4782      : isolate()->builtins()->KeyedStoreIC_Initialize();
4783  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4784}
4785
4786
4787void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4788  Register object = ToRegister(instr->object());
4789  Register temp = ToRegister(instr->temp());
4790  Label no_memento_found;
4791  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4792  DeoptimizeIf(equal, instr->environment());
4793  __ bind(&no_memento_found);
4794}
4795
4796
4797void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4798  Register object_reg = ToRegister(instr->object());
4799
4800  Handle<Map> from_map = instr->original_map();
4801  Handle<Map> to_map = instr->transitioned_map();
4802  ElementsKind from_kind = instr->from_kind();
4803  ElementsKind to_kind = instr->to_kind();
4804
4805  Label not_applicable;
4806  bool is_simple_map_transition =
4807      IsSimpleMapChangeTransition(from_kind, to_kind);
4808  Label::Distance branch_distance =
4809      is_simple_map_transition ? Label::kNear : Label::kFar;
4810  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4811  __ j(not_equal, &not_applicable, branch_distance);
4812  if (is_simple_map_transition) {
4813    Register new_map_reg = ToRegister(instr->new_map_temp());
4814    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4815           Immediate(to_map));
4816    // Write barrier.
4817    ASSERT_NE(instr->temp(), NULL);
4818    __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4819                         ToRegister(instr->temp()),
4820                         kDontSaveFPRegs);
4821  } else {
4822    ASSERT(ToRegister(instr->context()).is(esi));
4823    PushSafepointRegistersScope scope(this);
4824    if (!object_reg.is(eax)) {
4825      __ mov(eax, object_reg);
4826    }
4827    __ mov(ebx, to_map);
4828    TransitionElementsKindStub stub(from_kind, to_kind);
4829    __ CallStub(&stub);
4830    RecordSafepointWithRegisters(
4831        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4832  }
4833  __ bind(&not_applicable);
4834}
4835
4836
4837void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4838  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4839   public:
4840    DeferredStringCharCodeAt(LCodeGen* codegen,
4841                             LStringCharCodeAt* instr,
4842                             const X87Stack& x87_stack)
4843        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4844    virtual void Generate() V8_OVERRIDE {
4845      codegen()->DoDeferredStringCharCodeAt(instr_);
4846    }
4847    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4848   private:
4849    LStringCharCodeAt* instr_;
4850  };
4851
4852  DeferredStringCharCodeAt* deferred =
4853      new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4854
4855  StringCharLoadGenerator::Generate(masm(),
4856                                    factory(),
4857                                    ToRegister(instr->string()),
4858                                    ToRegister(instr->index()),
4859                                    ToRegister(instr->result()),
4860                                    deferred->entry());
4861  __ bind(deferred->exit());
4862}
4863
4864
4865void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4866  Register string = ToRegister(instr->string());
4867  Register result = ToRegister(instr->result());
4868
4869  // TODO(3095996): Get rid of this. For now, we need to make the
4870  // result register contain a valid pointer because it is already
4871  // contained in the register pointer map.
4872  __ Set(result, Immediate(0));
4873
4874  PushSafepointRegistersScope scope(this);
4875  __ push(string);
4876  // Push the index as a smi. This is safe because of the checks in
4877  // DoStringCharCodeAt above.
4878  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4879  if (instr->index()->IsConstantOperand()) {
4880    Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4881                                      Representation::Smi());
4882    __ push(immediate);
4883  } else {
4884    Register index = ToRegister(instr->index());
4885    __ SmiTag(index);
4886    __ push(index);
4887  }
4888  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
4889                          instr, instr->context());
4890  __ AssertSmi(eax);
4891  __ SmiUntag(eax);
4892  __ StoreToSafepointRegisterSlot(result, eax);
4893}
4894
4895
4896void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4897  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4898   public:
4899    DeferredStringCharFromCode(LCodeGen* codegen,
4900                               LStringCharFromCode* instr,
4901                               const X87Stack& x87_stack)
4902        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4903    virtual void Generate() V8_OVERRIDE {
4904      codegen()->DoDeferredStringCharFromCode(instr_);
4905    }
4906    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4907   private:
4908    LStringCharFromCode* instr_;
4909  };
4910
4911  DeferredStringCharFromCode* deferred =
4912      new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4913
4914  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4915  Register char_code = ToRegister(instr->char_code());
4916  Register result = ToRegister(instr->result());
4917  ASSERT(!char_code.is(result));
4918
4919  __ cmp(char_code, String::kMaxOneByteCharCode);
4920  __ j(above, deferred->entry());
4921  __ Set(result, Immediate(factory()->single_character_string_cache()));
4922  __ mov(result, FieldOperand(result,
4923                              char_code, times_pointer_size,
4924                              FixedArray::kHeaderSize));
4925  __ cmp(result, factory()->undefined_value());
4926  __ j(equal, deferred->entry());
4927  __ bind(deferred->exit());
4928}
4929
4930
4931void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4932  Register char_code = ToRegister(instr->char_code());
4933  Register result = ToRegister(instr->result());
4934
4935  // TODO(3095996): Get rid of this. For now, we need to make the
4936  // result register contain a valid pointer because it is already
4937  // contained in the register pointer map.
4938  __ Set(result, Immediate(0));
4939
4940  PushSafepointRegistersScope scope(this);
4941  __ SmiTag(char_code);
4942  __ push(char_code);
4943  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4944  __ StoreToSafepointRegisterSlot(result, eax);
4945}
4946
4947
4948void LCodeGen::DoStringAdd(LStringAdd* instr) {
4949  ASSERT(ToRegister(instr->context()).is(esi));
4950  if (FLAG_new_string_add) {
4951    ASSERT(ToRegister(instr->left()).is(edx));
4952    ASSERT(ToRegister(instr->right()).is(eax));
4953    NewStringAddStub stub(instr->hydrogen()->flags(),
4954                          isolate()->heap()->GetPretenureMode());
4955    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4956  } else {
4957    EmitPushTaggedOperand(instr->left());
4958    EmitPushTaggedOperand(instr->right());
4959    StringAddStub stub(instr->hydrogen()->flags());
4960    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4961  }
4962}
4963
4964
4965void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4966  LOperand* input = instr->value();
4967  LOperand* output = instr->result();
4968  ASSERT(input->IsRegister() || input->IsStackSlot());
4969  ASSERT(output->IsDoubleRegister());
4970  if (CpuFeatures::IsSupported(SSE2)) {
4971    CpuFeatureScope scope(masm(), SSE2);
4972    __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4973  } else if (input->IsRegister()) {
4974    Register input_reg = ToRegister(input);
4975    __ push(input_reg);
4976    X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4977    __ pop(input_reg);
4978  } else {
4979    X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4980  }
4981}
4982
4983
4984void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4985  Register input = ToRegister(instr->value());
4986  __ SmiTag(input);
4987  if (!instr->hydrogen()->value()->HasRange() ||
4988      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4989    DeoptimizeIf(overflow, instr->environment());
4990  }
4991}
4992
4993
4994void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4995  LOperand* input = instr->value();
4996  LOperand* output = instr->result();
4997  if (CpuFeatures::IsSupported(SSE2)) {
4998    CpuFeatureScope scope(masm(), SSE2);
4999    LOperand* temp = instr->temp();
5000
5001    __ LoadUint32(ToDoubleRegister(output),
5002                  ToRegister(input),
5003                  ToDoubleRegister(temp));
5004  } else {
5005    X87Register res = ToX87Register(output);
5006    X87PrepareToWrite(res);
5007    __ LoadUint32NoSSE2(ToRegister(input));
5008    X87CommitWrite(res);
5009  }
5010}
5011
5012
5013void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
5014  Register input = ToRegister(instr->value());
5015  if (!instr->hydrogen()->value()->HasRange() ||
5016      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
5017    __ test(input, Immediate(0xc0000000));
5018    DeoptimizeIf(not_zero, instr->environment());
5019  }
5020  __ SmiTag(input);
5021}
5022
5023
5024void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5025  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
5026   public:
5027    DeferredNumberTagI(LCodeGen* codegen,
5028                       LNumberTagI* instr,
5029                       const X87Stack& x87_stack)
5030        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5031    virtual void Generate() V8_OVERRIDE {
5032      codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
5033    }
5034    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5035   private:
5036    LNumberTagI* instr_;
5037  };
5038
5039  LOperand* input = instr->value();
5040  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5041  Register reg = ToRegister(input);
5042
5043  DeferredNumberTagI* deferred =
5044      new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
5045  __ SmiTag(reg);
5046  __ j(overflow, deferred->entry());
5047  __ bind(deferred->exit());
5048}
5049
5050
5051void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5052  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5053   public:
5054    DeferredNumberTagU(LCodeGen* codegen,
5055                       LNumberTagU* instr,
5056                       const X87Stack& x87_stack)
5057        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5058    virtual void Generate() V8_OVERRIDE {
5059      codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
5060    }
5061    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5062   private:
5063    LNumberTagU* instr_;
5064  };
5065
5066  LOperand* input = instr->value();
5067  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5068  Register reg = ToRegister(input);
5069
5070  DeferredNumberTagU* deferred =
5071      new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
5072  __ cmp(reg, Immediate(Smi::kMaxValue));
5073  __ j(above, deferred->entry());
5074  __ SmiTag(reg);
5075  __ bind(deferred->exit());
5076}
5077
5078
5079void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
5080                                    LOperand* value,
5081                                    IntegerSignedness signedness) {
5082  Label slow;
5083  Register reg = ToRegister(value);
5084  Register tmp = reg.is(eax) ? ecx : eax;
5085  XMMRegister xmm_scratch = double_scratch0();
5086
5087  // Preserve the value of all registers.
5088  PushSafepointRegistersScope scope(this);
5089
5090  Label done;
5091
5092  if (signedness == SIGNED_INT32) {
5093    // There was overflow, so bits 30 and 31 of the original integer
5094    // disagree. Try to allocate a heap number in new space and store
5095    // the value in there. If that fails, call the runtime system.
5096    __ SmiUntag(reg);
5097    __ xor_(reg, 0x80000000);
5098    if (CpuFeatures::IsSupported(SSE2)) {
5099      CpuFeatureScope feature_scope(masm(), SSE2);
5100      __ Cvtsi2sd(xmm_scratch, Operand(reg));
5101    } else {
5102      __ push(reg);
5103      __ fild_s(Operand(esp, 0));
5104      __ pop(reg);
5105    }
5106  } else {
5107    if (CpuFeatures::IsSupported(SSE2)) {
5108      CpuFeatureScope feature_scope(masm(), SSE2);
5109      __ LoadUint32(xmm_scratch, reg,
5110                    ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
5111    } else {
5112      // There's no fild variant for unsigned values, so zero-extend to a 64-bit
5113      // int manually.
5114      __ push(Immediate(0));
5115      __ push(reg);
5116      __ fild_d(Operand(esp, 0));
5117      __ pop(reg);
5118      __ pop(reg);
5119    }
5120  }
5121
5122  if (FLAG_inline_new) {
5123    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5124    __ jmp(&done, Label::kNear);
5125  }
5126
5127  // Slow case: Call the runtime system to do the number allocation.
5128  __ bind(&slow);
5129
5130  // TODO(3095996): Put a valid pointer value in the stack slot where the result
5131  // register is stored, as this register is in the pointer map, but contains an
5132  // integer value.
5133  __ StoreToSafepointRegisterSlot(reg, Immediate(0));
5134  // NumberTagI and NumberTagD use the context from the frame, rather than
5135  // the environment's HContext or HInlinedContext value.
5136  // They only call Runtime::kAllocateHeapNumber.
5137  // The corresponding HChange instructions are added in a phase that does
5138  // not have easy access to the local context.
5139  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5140  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5141  RecordSafepointWithRegisters(
5142      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5143  if (!reg.is(eax)) __ mov(reg, eax);
5144
5145  // Done. Put the value in xmm_scratch into the value of the allocated heap
5146  // number.
5147  __ bind(&done);
5148  if (CpuFeatures::IsSupported(SSE2)) {
5149    CpuFeatureScope feature_scope(masm(), SSE2);
5150    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5151  } else {
5152    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5153  }
5154  __ StoreToSafepointRegisterSlot(reg, reg);
5155}
5156
5157
5158void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5159  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5160   public:
5161    DeferredNumberTagD(LCodeGen* codegen,
5162                       LNumberTagD* instr,
5163                       const X87Stack& x87_stack)
5164        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5165    virtual void Generate() V8_OVERRIDE {
5166      codegen()->DoDeferredNumberTagD(instr_);
5167    }
5168    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5169   private:
5170    LNumberTagD* instr_;
5171  };
5172
5173  Register reg = ToRegister(instr->result());
5174
5175  bool use_sse2 = CpuFeatures::IsSupported(SSE2);
5176  if (!use_sse2) {
5177    // Put the value to the top of stack
5178    X87Register src = ToX87Register(instr->value());
5179    X87LoadForUsage(src);
5180  }
5181
5182  DeferredNumberTagD* deferred =
5183      new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
5184  if (FLAG_inline_new) {
5185    Register tmp = ToRegister(instr->temp());
5186    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5187  } else {
5188    __ jmp(deferred->entry());
5189  }
5190  __ bind(deferred->exit());
5191  if (use_sse2) {
5192    CpuFeatureScope scope(masm(), SSE2);
5193    XMMRegister input_reg = ToDoubleRegister(instr->value());
5194    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5195  } else {
5196    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5197  }
5198}
5199
5200
5201void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5202  // TODO(3095996): Get rid of this. For now, we need to make the
5203  // result register contain a valid pointer because it is already
5204  // contained in the register pointer map.
5205  Register reg = ToRegister(instr->result());
5206  __ Set(reg, Immediate(0));
5207
5208  PushSafepointRegistersScope scope(this);
5209  // NumberTagI and NumberTagD use the context from the frame, rather than
5210  // the environment's HContext or HInlinedContext value.
5211  // They only call Runtime::kAllocateHeapNumber.
5212  // The corresponding HChange instructions are added in a phase that does
5213  // not have easy access to the local context.
5214  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5215  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5216  RecordSafepointWithRegisters(
5217      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5218  __ StoreToSafepointRegisterSlot(reg, eax);
5219}
5220
5221
5222void LCodeGen::DoSmiTag(LSmiTag* instr) {
5223  LOperand* input = instr->value();
5224  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5225  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
5226  __ SmiTag(ToRegister(input));
5227}
5228
5229
5230void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5231  LOperand* input = instr->value();
5232  Register result = ToRegister(input);
5233  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5234  if (instr->needs_check()) {
5235    __ test(result, Immediate(kSmiTagMask));
5236    DeoptimizeIf(not_zero, instr->environment());
5237  } else {
5238    __ AssertSmi(result);
5239  }
5240  __ SmiUntag(result);
5241}
5242
5243
5244void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5245                                      Register temp_reg,
5246                                      X87Register res_reg,
5247                                      bool can_convert_undefined_to_nan,
5248                                      bool deoptimize_on_minus_zero,
5249                                      LEnvironment* env,
5250                                      NumberUntagDMode mode) {
5251  Label load_smi, done;
5252
5253  X87PrepareToWrite(res_reg);
5254  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5255    // Smi check.
5256    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5257
5258    // Heap number map check.
5259    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5260           factory()->heap_number_map());
5261    if (!can_convert_undefined_to_nan) {
5262      DeoptimizeIf(not_equal, env);
5263    } else {
5264      Label heap_number, convert;
5265      __ j(equal, &heap_number, Label::kNear);
5266
5267      // Convert undefined (or hole) to NaN.
5268      __ cmp(input_reg, factory()->undefined_value());
5269      DeoptimizeIf(not_equal, env);
5270
5271      __ bind(&convert);
5272      ExternalReference nan =
5273          ExternalReference::address_of_canonical_non_hole_nan();
5274      __ fld_d(Operand::StaticVariable(nan));
5275      __ jmp(&done, Label::kNear);
5276
5277      __ bind(&heap_number);
5278    }
5279    // Heap number to x87 conversion.
5280    __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5281    if (deoptimize_on_minus_zero) {
5282      __ fldz();
5283      __ FCmp();
5284      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5285      __ j(not_zero, &done, Label::kNear);
5286
5287      // Use general purpose registers to check if we have -0.0
5288      __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5289      __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5290      __ j(zero, &done, Label::kNear);
5291
5292      // Pop FPU stack before deoptimizing.
5293      __ fstp(0);
5294      DeoptimizeIf(not_zero, env);
5295    }
5296    __ jmp(&done, Label::kNear);
5297  } else {
5298    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5299  }
5300
5301  __ bind(&load_smi);
5302  // Clobbering a temp is faster than re-tagging the
5303  // input register since we avoid dependencies.
5304  __ mov(temp_reg, input_reg);
5305  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
5306  __ push(temp_reg);
5307  __ fild_s(Operand(esp, 0));
5308  __ add(esp, Immediate(kPointerSize));
5309  __ bind(&done);
5310  X87CommitWrite(res_reg);
5311}
5312
5313
5314void LCodeGen::EmitNumberUntagD(Register input_reg,
5315                                Register temp_reg,
5316                                XMMRegister result_reg,
5317                                bool can_convert_undefined_to_nan,
5318                                bool deoptimize_on_minus_zero,
5319                                LEnvironment* env,
5320                                NumberUntagDMode mode) {
5321  Label convert, load_smi, done;
5322
5323  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5324    // Smi check.
5325    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5326
5327    // Heap number map check.
5328    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5329           factory()->heap_number_map());
5330    if (can_convert_undefined_to_nan) {
5331      __ j(not_equal, &convert, Label::kNear);
5332    } else {
5333      DeoptimizeIf(not_equal, env);
5334    }
5335
5336    // Heap number to XMM conversion.
5337    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5338
5339    if (deoptimize_on_minus_zero) {
5340      XMMRegister xmm_scratch = double_scratch0();
5341      __ xorps(xmm_scratch, xmm_scratch);
5342      __ ucomisd(result_reg, xmm_scratch);
5343      __ j(not_zero, &done, Label::kNear);
5344      __ movmskpd(temp_reg, result_reg);
5345      __ test_b(temp_reg, 1);
5346      DeoptimizeIf(not_zero, env);
5347    }
5348    __ jmp(&done, Label::kNear);
5349
5350    if (can_convert_undefined_to_nan) {
5351      __ bind(&convert);
5352
5353      // Convert undefined (and hole) to NaN.
5354      __ cmp(input_reg, factory()->undefined_value());
5355      DeoptimizeIf(not_equal, env);
5356
5357      ExternalReference nan =
5358          ExternalReference::address_of_canonical_non_hole_nan();
5359      __ movsd(result_reg, Operand::StaticVariable(nan));
5360      __ jmp(&done, Label::kNear);
5361    }
5362  } else {
5363    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5364  }
5365
5366  __ bind(&load_smi);
5367  // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
5368  // input register since we avoid dependencies.
5369  __ mov(temp_reg, input_reg);
5370  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
5371  __ Cvtsi2sd(result_reg, Operand(temp_reg));
5372  __ bind(&done);
5373}
5374
5375
5376void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5377  Register input_reg = ToRegister(instr->value());
5378
5379  if (instr->truncating()) {
5380    Label no_heap_number, check_bools, check_false;
5381
5382    // Heap number map check.
5383    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5384           factory()->heap_number_map());
5385    __ j(not_equal, &no_heap_number, Label::kNear);
5386    __ TruncateHeapNumberToI(input_reg, input_reg);
5387    __ jmp(done);
5388
5389    __ bind(&no_heap_number);
5390    // Check for Oddballs. Undefined/False is converted to zero and True to one
5391    // for truncating conversions.
5392    __ cmp(input_reg, factory()->undefined_value());
5393    __ j(not_equal, &check_bools, Label::kNear);
5394    __ Set(input_reg, Immediate(0));
5395    __ jmp(done);
5396
5397    __ bind(&check_bools);
5398    __ cmp(input_reg, factory()->true_value());
5399    __ j(not_equal, &check_false, Label::kNear);
5400    __ Set(input_reg, Immediate(1));
5401    __ jmp(done);
5402
5403    __ bind(&check_false);
5404    __ cmp(input_reg, factory()->false_value());
5405    __ RecordComment("Deferred TaggedToI: cannot truncate");
5406    DeoptimizeIf(not_equal, instr->environment());
5407    __ Set(input_reg, Immediate(0));
5408    __ jmp(done);
5409  } else {
5410    Label bailout;
5411    XMMRegister scratch = (instr->temp() != NULL)
5412        ? ToDoubleRegister(instr->temp())
5413        : no_xmm_reg;
5414    __ TaggedToI(input_reg, input_reg, scratch,
5415                 instr->hydrogen()->GetMinusZeroMode(), &bailout);
5416    __ jmp(done);
5417    __ bind(&bailout);
5418    DeoptimizeIf(no_condition, instr->environment());
5419  }
5420}
5421
5422
5423void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5424  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5425   public:
5426    DeferredTaggedToI(LCodeGen* codegen,
5427                      LTaggedToI* instr,
5428                      const X87Stack& x87_stack)
5429        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5430    virtual void Generate() V8_OVERRIDE {
5431      codegen()->DoDeferredTaggedToI(instr_, done());
5432    }
5433    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5434   private:
5435    LTaggedToI* instr_;
5436  };
5437
5438  LOperand* input = instr->value();
5439  ASSERT(input->IsRegister());
5440  Register input_reg = ToRegister(input);
5441  ASSERT(input_reg.is(ToRegister(instr->result())));
5442
5443  if (instr->hydrogen()->value()->representation().IsSmi()) {
5444    __ SmiUntag(input_reg);
5445  } else {
5446    DeferredTaggedToI* deferred =
5447        new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5448
5449    __ JumpIfNotSmi(input_reg, deferred->entry());
5450    __ SmiUntag(input_reg);
5451    __ bind(deferred->exit());
5452  }
5453}
5454
5455
5456void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5457  LOperand* input = instr->value();
5458  ASSERT(input->IsRegister());
5459  LOperand* temp = instr->temp();
5460  ASSERT(temp->IsRegister());
5461  LOperand* result = instr->result();
5462  ASSERT(result->IsDoubleRegister());
5463
5464  Register input_reg = ToRegister(input);
5465  bool deoptimize_on_minus_zero =
5466      instr->hydrogen()->deoptimize_on_minus_zero();
5467  Register temp_reg = ToRegister(temp);
5468
5469  HValue* value = instr->hydrogen()->value();
5470  NumberUntagDMode mode = value->representation().IsSmi()
5471      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5472
5473  if (CpuFeatures::IsSupported(SSE2)) {
5474    CpuFeatureScope scope(masm(), SSE2);
5475    XMMRegister result_reg = ToDoubleRegister(result);
5476    EmitNumberUntagD(input_reg,
5477                     temp_reg,
5478                     result_reg,
5479                     instr->hydrogen()->can_convert_undefined_to_nan(),
5480                     deoptimize_on_minus_zero,
5481                     instr->environment(),
5482                     mode);
5483  } else {
5484    EmitNumberUntagDNoSSE2(input_reg,
5485                           temp_reg,
5486                           ToX87Register(instr->result()),
5487                           instr->hydrogen()->can_convert_undefined_to_nan(),
5488                           deoptimize_on_minus_zero,
5489                           instr->environment(),
5490                           mode);
5491  }
5492}
5493
5494
5495void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5496  LOperand* input = instr->value();
5497  ASSERT(input->IsDoubleRegister());
5498  LOperand* result = instr->result();
5499  ASSERT(result->IsRegister());
5500  Register result_reg = ToRegister(result);
5501
5502  if (instr->truncating()) {
5503    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5504      CpuFeatureScope scope(masm(), SSE2);
5505      XMMRegister input_reg = ToDoubleRegister(input);
5506      __ TruncateDoubleToI(result_reg, input_reg);
5507    } else {
5508      X87Register input_reg = ToX87Register(input);
5509      X87Fxch(input_reg);
5510      __ TruncateX87TOSToI(result_reg);
5511    }
5512  } else {
5513    Label bailout, done;
5514    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5515      CpuFeatureScope scope(masm(), SSE2);
5516      XMMRegister input_reg = ToDoubleRegister(input);
5517      XMMRegister xmm_scratch = double_scratch0();
5518       __ DoubleToI(result_reg, input_reg, xmm_scratch,
5519           instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5520    } else {
5521      X87Register input_reg = ToX87Register(input);
5522      X87Fxch(input_reg);
5523      __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5524                   &bailout, Label::kNear);
5525    }
5526    __ jmp(&done, Label::kNear);
5527    __ bind(&bailout);
5528    DeoptimizeIf(no_condition, instr->environment());
5529    __ bind(&done);
5530  }
5531}
5532
5533
5534void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5535  LOperand* input = instr->value();
5536  ASSERT(input->IsDoubleRegister());
5537  LOperand* result = instr->result();
5538  ASSERT(result->IsRegister());
5539  Register result_reg = ToRegister(result);
5540
5541  Label bailout, done;
5542  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5543    CpuFeatureScope scope(masm(), SSE2);
5544    XMMRegister input_reg = ToDoubleRegister(input);
5545    XMMRegister xmm_scratch = double_scratch0();
5546    __ DoubleToI(result_reg, input_reg, xmm_scratch,
5547        instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5548  } else {
5549    X87Register input_reg = ToX87Register(input);
5550    X87Fxch(input_reg);
5551    __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5552        &bailout, Label::kNear);
5553  }
5554  __ jmp(&done, Label::kNear);
5555  __ bind(&bailout);
5556  DeoptimizeIf(no_condition, instr->environment());
5557  __ bind(&done);
5558
5559  __ SmiTag(result_reg);
5560  DeoptimizeIf(overflow, instr->environment());
5561}
5562
5563
5564void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5565  LOperand* input = instr->value();
5566  __ test(ToOperand(input), Immediate(kSmiTagMask));
5567  DeoptimizeIf(not_zero, instr->environment());
5568}
5569
5570
5571void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5572  if (!instr->hydrogen()->value()->IsHeapObject()) {
5573    LOperand* input = instr->value();
5574    __ test(ToOperand(input), Immediate(kSmiTagMask));
5575    DeoptimizeIf(zero, instr->environment());
5576  }
5577}
5578
5579
5580void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5581  Register input = ToRegister(instr->value());
5582  Register temp = ToRegister(instr->temp());
5583
5584  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5585
5586  if (instr->hydrogen()->is_interval_check()) {
5587    InstanceType first;
5588    InstanceType last;
5589    instr->hydrogen()->GetCheckInterval(&first, &last);
5590
5591    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5592            static_cast<int8_t>(first));
5593
5594    // If there is only one type in the interval check for equality.
5595    if (first == last) {
5596      DeoptimizeIf(not_equal, instr->environment());
5597    } else {
5598      DeoptimizeIf(below, instr->environment());
5599      // Omit check for the last type.
5600      if (last != LAST_TYPE) {
5601        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5602                static_cast<int8_t>(last));
5603        DeoptimizeIf(above, instr->environment());
5604      }
5605    }
5606  } else {
5607    uint8_t mask;
5608    uint8_t tag;
5609    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5610
5611    if (IsPowerOf2(mask)) {
5612      ASSERT(tag == 0 || IsPowerOf2(tag));
5613      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5614      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5615    } else {
5616      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5617      __ and_(temp, mask);
5618      __ cmp(temp, tag);
5619      DeoptimizeIf(not_equal, instr->environment());
5620    }
5621  }
5622}
5623
5624
5625void LCodeGen::DoCheckValue(LCheckValue* instr) {
5626  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5627  if (instr->hydrogen()->object_in_new_space()) {
5628    Register reg = ToRegister(instr->value());
5629    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5630    __ cmp(reg, Operand::ForCell(cell));
5631  } else {
5632    Operand operand = ToOperand(instr->value());
5633    __ cmp(operand, object);
5634  }
5635  DeoptimizeIf(not_equal, instr->environment());
5636}
5637
5638
5639void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5640  {
5641    PushSafepointRegistersScope scope(this);
5642    __ push(object);
5643    __ xor_(esi, esi);
5644    __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5645    RecordSafepointWithRegisters(
5646        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5647
5648    __ test(eax, Immediate(kSmiTagMask));
5649  }
5650  DeoptimizeIf(zero, instr->environment());
5651}
5652
5653
5654void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5655  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5656   public:
5657    DeferredCheckMaps(LCodeGen* codegen,
5658                      LCheckMaps* instr,
5659                      Register object,
5660                      const X87Stack& x87_stack)
5661        : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5662      SetExit(check_maps());
5663    }
5664    virtual void Generate() V8_OVERRIDE {
5665      codegen()->DoDeferredInstanceMigration(instr_, object_);
5666    }
5667    Label* check_maps() { return &check_maps_; }
5668    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5669   private:
5670    LCheckMaps* instr_;
5671    Label check_maps_;
5672    Register object_;
5673  };
5674
5675  if (instr->hydrogen()->CanOmitMapChecks()) return;
5676
5677  LOperand* input = instr->value();
5678  ASSERT(input->IsRegister());
5679  Register reg = ToRegister(input);
5680
5681  DeferredCheckMaps* deferred = NULL;
5682  if (instr->hydrogen()->has_migration_target()) {
5683    deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5684    __ bind(deferred->check_maps());
5685  }
5686
5687  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5688  Label success;
5689  for (int i = 0; i < map_set.size() - 1; i++) {
5690    Handle<Map> map = map_set.at(i).handle();
5691    __ CompareMap(reg, map);
5692    __ j(equal, &success, Label::kNear);
5693  }
5694
5695  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5696  __ CompareMap(reg, map);
5697  if (instr->hydrogen()->has_migration_target()) {
5698    __ j(not_equal, deferred->entry());
5699  } else {
5700    DeoptimizeIf(not_equal, instr->environment());
5701  }
5702
5703  __ bind(&success);
5704}
5705
5706
5707void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5708  CpuFeatureScope scope(masm(), SSE2);
5709  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5710  XMMRegister xmm_scratch = double_scratch0();
5711  Register result_reg = ToRegister(instr->result());
5712  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5713}
5714
5715
5716void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5717  ASSERT(instr->unclamped()->Equals(instr->result()));
5718  Register value_reg = ToRegister(instr->result());
5719  __ ClampUint8(value_reg);
5720}
5721
5722
5723void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5724  CpuFeatureScope scope(masm(), SSE2);
5725
5726  ASSERT(instr->unclamped()->Equals(instr->result()));
5727  Register input_reg = ToRegister(instr->unclamped());
5728  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5729  XMMRegister xmm_scratch = double_scratch0();
5730  Label is_smi, done, heap_number;
5731
5732  __ JumpIfSmi(input_reg, &is_smi);
5733
5734  // Check for heap number
5735  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5736         factory()->heap_number_map());
5737  __ j(equal, &heap_number, Label::kNear);
5738
5739  // Check for undefined. Undefined is converted to zero for clamping
5740  // conversions.
5741  __ cmp(input_reg, factory()->undefined_value());
5742  DeoptimizeIf(not_equal, instr->environment());
5743  __ mov(input_reg, 0);
5744  __ jmp(&done, Label::kNear);
5745
5746  // Heap number
5747  __ bind(&heap_number);
5748  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5749  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5750  __ jmp(&done, Label::kNear);
5751
5752  // smi
5753  __ bind(&is_smi);
5754  __ SmiUntag(input_reg);
5755  __ ClampUint8(input_reg);
5756  __ bind(&done);
5757}
5758
5759
5760void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5761  Register input_reg = ToRegister(instr->unclamped());
5762  Register result_reg = ToRegister(instr->result());
5763  Register scratch = ToRegister(instr->scratch());
5764  Register scratch2 = ToRegister(instr->scratch2());
5765  Register scratch3 = ToRegister(instr->scratch3());
5766  Label is_smi, done, heap_number, valid_exponent,
5767      largest_value, zero_result, maybe_nan_or_infinity;
5768
5769  __ JumpIfSmi(input_reg, &is_smi);
5770
5771  // Check for heap number
5772  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5773         factory()->heap_number_map());
5774  __ j(equal, &heap_number, Label::kNear);
5775
5776  // Check for undefined. Undefined is converted to zero for clamping
5777  // conversions.
5778  __ cmp(input_reg, factory()->undefined_value());
5779  DeoptimizeIf(not_equal, instr->environment());
5780  __ jmp(&zero_result, Label::kNear);
5781
5782  // Heap number
5783  __ bind(&heap_number);
5784
5785  // Surprisingly, all of the hand-crafted bit-manipulations below are much
5786  // faster than the x86 FPU built-in instruction, especially since "banker's
5787  // rounding" would be additionally very expensive
5788
5789  // Get exponent word.
5790  __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5791  __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5792
5793  // Test for negative values --> clamp to zero
5794  __ test(scratch, scratch);
5795  __ j(negative, &zero_result, Label::kNear);
5796
5797  // Get exponent alone in scratch2.
5798  __ mov(scratch2, scratch);
5799  __ and_(scratch2, HeapNumber::kExponentMask);
5800  __ shr(scratch2, HeapNumber::kExponentShift);
5801  __ j(zero, &zero_result, Label::kNear);
5802  __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5803  __ j(negative, &zero_result, Label::kNear);
5804
5805  const uint32_t non_int8_exponent = 7;
5806  __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5807  // If the exponent is too big, check for special values.
5808  __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5809
5810  __ bind(&valid_exponent);
5811  // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5812  // < 7. The shift bias is the number of bits to shift the mantissa such that
5813  // with an exponent of 7 such the that top-most one is in bit 30, allowing
5814  // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5815  // 1).
5816  int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5817  __ lea(result_reg, MemOperand(scratch2, shift_bias));
5818  // Here result_reg (ecx) is the shift, scratch is the exponent word.  Get the
5819  // top bits of the mantissa.
5820  __ and_(scratch, HeapNumber::kMantissaMask);
5821  // Put back the implicit 1 of the mantissa
5822  __ or_(scratch, 1 << HeapNumber::kExponentShift);
5823  // Shift up to round
5824  __ shl_cl(scratch);
5825  // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5826  // use the bit in the "ones" place and add it to the "halves" place, which has
5827  // the effect of rounding to even.
5828  __ mov(scratch2, scratch);
5829  const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5830  const uint32_t one_bit_shift = one_half_bit_shift + 1;
5831  __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5832  __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5833  Label no_round;
5834  __ j(less, &no_round, Label::kNear);
5835  Label round_up;
5836  __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5837  __ j(greater, &round_up, Label::kNear);
5838  __ test(scratch3, scratch3);
5839  __ j(not_zero, &round_up, Label::kNear);
5840  __ mov(scratch2, scratch);
5841  __ and_(scratch2, Immediate(1 << one_bit_shift));
5842  __ shr(scratch2, 1);
5843  __ bind(&round_up);
5844  __ add(scratch, scratch2);
5845  __ j(overflow, &largest_value, Label::kNear);
5846  __ bind(&no_round);
5847  __ shr(scratch, 23);
5848  __ mov(result_reg, scratch);
5849  __ jmp(&done, Label::kNear);
5850
5851  __ bind(&maybe_nan_or_infinity);
5852  // Check for NaN/Infinity, all other values map to 255
5853  __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5854  __ j(not_equal, &largest_value, Label::kNear);
5855
5856  // Check for NaN, which differs from Infinity in that at least one mantissa
5857  // bit is set.
5858  __ and_(scratch, HeapNumber::kMantissaMask);
5859  __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5860  __ j(not_zero, &zero_result, Label::kNear);  // M!=0 --> NaN
5861  // Infinity -> Fall through to map to 255.
5862
5863  __ bind(&largest_value);
5864  __ mov(result_reg, Immediate(255));
5865  __ jmp(&done, Label::kNear);
5866
5867  __ bind(&zero_result);
5868  __ xor_(result_reg, result_reg);
5869  __ jmp(&done, Label::kNear);
5870
5871  // smi
5872  __ bind(&is_smi);
5873  if (!input_reg.is(result_reg)) {
5874    __ mov(result_reg, input_reg);
5875  }
5876  __ SmiUntag(result_reg);
5877  __ ClampUint8(result_reg);
5878  __ bind(&done);
5879}
5880
5881
5882void LCodeGen::DoAllocate(LAllocate* instr) {
5883  class DeferredAllocate V8_FINAL : public LDeferredCode {
5884   public:
5885    DeferredAllocate(LCodeGen* codegen,
5886                     LAllocate* instr,
5887                     const X87Stack& x87_stack)
5888        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5889    virtual void Generate() V8_OVERRIDE {
5890      codegen()->DoDeferredAllocate(instr_);
5891    }
5892    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5893   private:
5894    LAllocate* instr_;
5895  };
5896
5897  DeferredAllocate* deferred =
5898      new(zone()) DeferredAllocate(this, instr, x87_stack_);
5899
5900  Register result = ToRegister(instr->result());
5901  Register temp = ToRegister(instr->temp());
5902
5903  // Allocate memory for the object.
5904  AllocationFlags flags = TAG_OBJECT;
5905  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5906    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5907  }
5908  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5909    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5910    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5911    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5912  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5913    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5914    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5915  }
5916
5917  if (instr->size()->IsConstantOperand()) {
5918    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5919    if (size <= Page::kMaxRegularHeapObjectSize) {
5920      __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5921    } else {
5922      __ jmp(deferred->entry());
5923    }
5924  } else {
5925    Register size = ToRegister(instr->size());
5926    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5927  }
5928
5929  __ bind(deferred->exit());
5930
5931  if (instr->hydrogen()->MustPrefillWithFiller()) {
5932    if (instr->size()->IsConstantOperand()) {
5933      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5934      __ mov(temp, (size / kPointerSize) - 1);
5935    } else {
5936      temp = ToRegister(instr->size());
5937      __ shr(temp, kPointerSizeLog2);
5938      __ dec(temp);
5939    }
5940    Label loop;
5941    __ bind(&loop);
5942    __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5943        isolate()->factory()->one_pointer_filler_map());
5944    __ dec(temp);
5945    __ j(not_zero, &loop);
5946  }
5947}
5948
5949
5950void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5951  Register result = ToRegister(instr->result());
5952
5953  // TODO(3095996): Get rid of this. For now, we need to make the
5954  // result register contain a valid pointer because it is already
5955  // contained in the register pointer map.
5956  __ Set(result, Immediate(Smi::FromInt(0)));
5957
5958  PushSafepointRegistersScope scope(this);
5959  if (instr->size()->IsRegister()) {
5960    Register size = ToRegister(instr->size());
5961    ASSERT(!size.is(result));
5962    __ SmiTag(ToRegister(instr->size()));
5963    __ push(size);
5964  } else {
5965    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5966    __ push(Immediate(Smi::FromInt(size)));
5967  }
5968
5969  int flags = AllocateDoubleAlignFlag::encode(
5970      instr->hydrogen()->MustAllocateDoubleAligned());
5971  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5972    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5973    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5974    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5975  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5976    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5977    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5978  } else {
5979    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5980  }
5981  __ push(Immediate(Smi::FromInt(flags)));
5982
5983  CallRuntimeFromDeferred(
5984      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5985  __ StoreToSafepointRegisterSlot(result, eax);
5986}
5987
5988
5989void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5990  ASSERT(ToRegister(instr->value()).is(eax));
5991  __ push(eax);
5992  CallRuntime(Runtime::kToFastProperties, 1, instr);
5993}
5994
5995
5996void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5997  ASSERT(ToRegister(instr->context()).is(esi));
5998  Label materialized;
5999  // Registers will be used as follows:
6000  // ecx = literals array.
6001  // ebx = regexp literal.
6002  // eax = regexp literal clone.
6003  // esi = context.
6004  int literal_offset =
6005      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6006  __ LoadHeapObject(ecx, instr->hydrogen()->literals());
6007  __ mov(ebx, FieldOperand(ecx, literal_offset));
6008  __ cmp(ebx, factory()->undefined_value());
6009  __ j(not_equal, &materialized, Label::kNear);
6010
6011  // Create regexp literal using runtime function
6012  // Result will be in eax.
6013  __ push(ecx);
6014  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
6015  __ push(Immediate(instr->hydrogen()->pattern()));
6016  __ push(Immediate(instr->hydrogen()->flags()));
6017  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
6018  __ mov(ebx, eax);
6019
6020  __ bind(&materialized);
6021  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6022  Label allocated, runtime_allocate;
6023  __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
6024  __ jmp(&allocated, Label::kNear);
6025
6026  __ bind(&runtime_allocate);
6027  __ push(ebx);
6028  __ push(Immediate(Smi::FromInt(size)));
6029  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
6030  __ pop(ebx);
6031
6032  __ bind(&allocated);
6033  // Copy the content into the newly allocated memory.
6034  // (Unroll copy loop once for better throughput).
6035  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6036    __ mov(edx, FieldOperand(ebx, i));
6037    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
6038    __ mov(FieldOperand(eax, i), edx);
6039    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
6040  }
6041  if ((size % (2 * kPointerSize)) != 0) {
6042    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
6043    __ mov(FieldOperand(eax, size - kPointerSize), edx);
6044  }
6045}
6046
6047
6048void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6049  ASSERT(ToRegister(instr->context()).is(esi));
6050  // Use the fast case closure allocation code that allocates in new
6051  // space for nested functions that don't need literals cloning.
6052  bool pretenure = instr->hydrogen()->pretenure();
6053  if (!pretenure && instr->hydrogen()->has_no_literals()) {
6054    FastNewClosureStub stub(instr->hydrogen()->language_mode(),
6055                            instr->hydrogen()->is_generator());
6056    __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
6057    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6058  } else {
6059    __ push(esi);
6060    __ push(Immediate(instr->hydrogen()->shared_info()));
6061    __ push(Immediate(pretenure ? factory()->true_value()
6062                                : factory()->false_value()));
6063    CallRuntime(Runtime::kNewClosure, 3, instr);
6064  }
6065}
6066
6067
6068void LCodeGen::DoTypeof(LTypeof* instr) {
6069  ASSERT(ToRegister(instr->context()).is(esi));
6070  LOperand* input = instr->value();
6071  EmitPushTaggedOperand(input);
6072  CallRuntime(Runtime::kTypeof, 1, instr);
6073}
6074
6075
6076void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6077  Register input = ToRegister(instr->value());
6078  Condition final_branch_condition = EmitTypeofIs(instr, input);
6079  if (final_branch_condition != no_condition) {
6080    EmitBranch(instr, final_branch_condition);
6081  }
6082}
6083
6084
6085Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
6086  Label* true_label = instr->TrueLabel(chunk_);
6087  Label* false_label = instr->FalseLabel(chunk_);
6088  Handle<String> type_name = instr->type_literal();
6089  int left_block = instr->TrueDestination(chunk_);
6090  int right_block = instr->FalseDestination(chunk_);
6091  int next_block = GetNextEmittedBlock();
6092
6093  Label::Distance true_distance = left_block == next_block ? Label::kNear
6094                                                           : Label::kFar;
6095  Label::Distance false_distance = right_block == next_block ? Label::kNear
6096                                                             : Label::kFar;
6097  Condition final_branch_condition = no_condition;
6098  if (type_name->Equals(heap()->number_string())) {
6099    __ JumpIfSmi(input, true_label, true_distance);
6100    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
6101           factory()->heap_number_map());
6102    final_branch_condition = equal;
6103
6104  } else if (type_name->Equals(heap()->string_string())) {
6105    __ JumpIfSmi(input, false_label, false_distance);
6106    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6107    __ j(above_equal, false_label, false_distance);
6108    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6109              1 << Map::kIsUndetectable);
6110    final_branch_condition = zero;
6111
6112  } else if (type_name->Equals(heap()->symbol_string())) {
6113    __ JumpIfSmi(input, false_label, false_distance);
6114    __ CmpObjectType(input, SYMBOL_TYPE, input);
6115    final_branch_condition = equal;
6116
6117  } else if (type_name->Equals(heap()->boolean_string())) {
6118    __ cmp(input, factory()->true_value());
6119    __ j(equal, true_label, true_distance);
6120    __ cmp(input, factory()->false_value());
6121    final_branch_condition = equal;
6122
6123  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6124    __ cmp(input, factory()->null_value());
6125    final_branch_condition = equal;
6126
6127  } else if (type_name->Equals(heap()->undefined_string())) {
6128    __ cmp(input, factory()->undefined_value());
6129    __ j(equal, true_label, true_distance);
6130    __ JumpIfSmi(input, false_label, false_distance);
6131    // Check for undetectable objects => true.
6132    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
6133    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6134              1 << Map::kIsUndetectable);
6135    final_branch_condition = not_zero;
6136
6137  } else if (type_name->Equals(heap()->function_string())) {
6138    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6139    __ JumpIfSmi(input, false_label, false_distance);
6140    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6141    __ j(equal, true_label, true_distance);
6142    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6143    final_branch_condition = equal;
6144
6145  } else if (type_name->Equals(heap()->object_string())) {
6146    __ JumpIfSmi(input, false_label, false_distance);
6147    if (!FLAG_harmony_typeof) {
6148      __ cmp(input, factory()->null_value());
6149      __ j(equal, true_label, true_distance);
6150    }
6151    __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6152    __ j(below, false_label, false_distance);
6153    __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6154    __ j(above, false_label, false_distance);
6155    // Check for undetectable objects => false.
6156    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6157              1 << Map::kIsUndetectable);
6158    final_branch_condition = zero;
6159
6160  } else {
6161    __ jmp(false_label, false_distance);
6162  }
6163  return final_branch_condition;
6164}
6165
6166
6167void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6168  Register temp = ToRegister(instr->temp());
6169
6170  EmitIsConstructCall(temp);
6171  EmitBranch(instr, equal);
6172}
6173
6174
6175void LCodeGen::EmitIsConstructCall(Register temp) {
6176  // Get the frame pointer for the calling frame.
6177  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
6178
6179  // Skip the arguments adaptor frame if it exists.
6180  Label check_frame_marker;
6181  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6182         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6183  __ j(not_equal, &check_frame_marker, Label::kNear);
6184  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6185
6186  // Check the marker in the calling frame.
6187  __ bind(&check_frame_marker);
6188  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6189         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
6190}
6191
6192
6193void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6194  if (!info()->IsStub()) {
6195    // Ensure that we have enough space after the previous lazy-bailout
6196    // instruction for patching the code here.
6197    int current_pc = masm()->pc_offset();
6198    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6199      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6200      __ Nop(padding_size);
6201    }
6202  }
6203  last_lazy_deopt_pc_ = masm()->pc_offset();
6204}
6205
6206
6207void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6208  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6209  ASSERT(instr->HasEnvironment());
6210  LEnvironment* env = instr->environment();
6211  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6212  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6213}
6214
6215
6216void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6217  Deoptimizer::BailoutType type = instr->hydrogen()->type();
6218  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6219  // needed return address), even though the implementation of LAZY and EAGER is
6220  // now identical. When LAZY is eventually completely folded into EAGER, remove
6221  // the special case below.
6222  if (info()->IsStub() && type == Deoptimizer::EAGER) {
6223    type = Deoptimizer::LAZY;
6224  }
6225  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6226  DeoptimizeIf(no_condition, instr->environment(), type);
6227}
6228
6229
6230void LCodeGen::DoDummy(LDummy* instr) {
6231  // Nothing to see here, move on!
6232}
6233
6234
6235void LCodeGen::DoDummyUse(LDummyUse* instr) {
6236  // Nothing to see here, move on!
6237}
6238
6239
6240void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6241  PushSafepointRegistersScope scope(this);
6242  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
6243  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6244  RecordSafepointWithLazyDeopt(
6245      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6246  ASSERT(instr->HasEnvironment());
6247  LEnvironment* env = instr->environment();
6248  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6249}
6250
6251
6252void LCodeGen::DoStackCheck(LStackCheck* instr) {
6253  class DeferredStackCheck V8_FINAL : public LDeferredCode {
6254   public:
6255    DeferredStackCheck(LCodeGen* codegen,
6256                       LStackCheck* instr,
6257                       const X87Stack& x87_stack)
6258        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6259    virtual void Generate() V8_OVERRIDE {
6260      codegen()->DoDeferredStackCheck(instr_);
6261    }
6262    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6263   private:
6264    LStackCheck* instr_;
6265  };
6266
6267  ASSERT(instr->HasEnvironment());
6268  LEnvironment* env = instr->environment();
6269  // There is no LLazyBailout instruction for stack-checks. We have to
6270  // prepare for lazy deoptimization explicitly here.
6271  if (instr->hydrogen()->is_function_entry()) {
6272    // Perform stack overflow check.
6273    Label done;
6274    ExternalReference stack_limit =
6275        ExternalReference::address_of_stack_limit(isolate());
6276    __ cmp(esp, Operand::StaticVariable(stack_limit));
6277    __ j(above_equal, &done, Label::kNear);
6278
6279    ASSERT(instr->context()->IsRegister());
6280    ASSERT(ToRegister(instr->context()).is(esi));
6281    CallCode(isolate()->builtins()->StackCheck(),
6282             RelocInfo::CODE_TARGET,
6283             instr);
6284    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6285    __ bind(&done);
6286    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6287    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6288  } else {
6289    ASSERT(instr->hydrogen()->is_backwards_branch());
6290    // Perform stack overflow check if this goto needs it before jumping.
6291    DeferredStackCheck* deferred_stack_check =
6292        new(zone()) DeferredStackCheck(this, instr, x87_stack_);
6293    ExternalReference stack_limit =
6294        ExternalReference::address_of_stack_limit(isolate());
6295    __ cmp(esp, Operand::StaticVariable(stack_limit));
6296    __ j(below, deferred_stack_check->entry());
6297    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6298    __ bind(instr->done_label());
6299    deferred_stack_check->SetExit(instr->done_label());
6300    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6301    // Don't record a deoptimization index for the safepoint here.
6302    // This will be done explicitly when emitting call and the safepoint in
6303    // the deferred code.
6304  }
6305}
6306
6307
6308void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6309  // This is a pseudo-instruction that ensures that the environment here is
6310  // properly registered for deoptimization and records the assembler's PC
6311  // offset.
6312  LEnvironment* environment = instr->environment();
6313
6314  // If the environment were already registered, we would have no way of
6315  // backpatching it with the spill slot operands.
6316  ASSERT(!environment->HasBeenRegistered());
6317  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6318
6319  GenerateOsrPrologue();
6320}
6321
6322
6323void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6324  ASSERT(ToRegister(instr->context()).is(esi));
6325  __ cmp(eax, isolate()->factory()->undefined_value());
6326  DeoptimizeIf(equal, instr->environment());
6327
6328  __ cmp(eax, isolate()->factory()->null_value());
6329  DeoptimizeIf(equal, instr->environment());
6330
6331  __ test(eax, Immediate(kSmiTagMask));
6332  DeoptimizeIf(zero, instr->environment());
6333
6334  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6335  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
6336  DeoptimizeIf(below_equal, instr->environment());
6337
6338  Label use_cache, call_runtime;
6339  __ CheckEnumCache(&call_runtime);
6340
6341  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
6342  __ jmp(&use_cache, Label::kNear);
6343
6344  // Get the set of properties to enumerate.
6345  __ bind(&call_runtime);
6346  __ push(eax);
6347  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6348
6349  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6350         isolate()->factory()->meta_map());
6351  DeoptimizeIf(not_equal, instr->environment());
6352  __ bind(&use_cache);
6353}
6354
6355
6356void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6357  Register map = ToRegister(instr->map());
6358  Register result = ToRegister(instr->result());
6359  Label load_cache, done;
6360  __ EnumLength(result, map);
6361  __ cmp(result, Immediate(Smi::FromInt(0)));
6362  __ j(not_equal, &load_cache, Label::kNear);
6363  __ mov(result, isolate()->factory()->empty_fixed_array());
6364  __ jmp(&done, Label::kNear);
6365
6366  __ bind(&load_cache);
6367  __ LoadInstanceDescriptors(map, result);
6368  __ mov(result,
6369         FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6370  __ mov(result,
6371         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6372  __ bind(&done);
6373  __ test(result, result);
6374  DeoptimizeIf(equal, instr->environment());
6375}
6376
6377
6378void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6379  Register object = ToRegister(instr->value());
6380  __ cmp(ToRegister(instr->map()),
6381         FieldOperand(object, HeapObject::kMapOffset));
6382  DeoptimizeIf(not_equal, instr->environment());
6383}
6384
6385
6386void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6387  Register object = ToRegister(instr->object());
6388  Register index = ToRegister(instr->index());
6389
6390  Label out_of_object, done;
6391  __ cmp(index, Immediate(0));
6392  __ j(less, &out_of_object, Label::kNear);
6393  __ mov(object, FieldOperand(object,
6394                              index,
6395                              times_half_pointer_size,
6396                              JSObject::kHeaderSize));
6397  __ jmp(&done, Label::kNear);
6398
6399  __ bind(&out_of_object);
6400  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6401  __ neg(index);
6402  // Index is now equal to out of object property index plus 1.
6403  __ mov(object, FieldOperand(object,
6404                              index,
6405                              times_half_pointer_size,
6406                              FixedArray::kHeaderSize - kPointerSize));
6407  __ bind(&done);
6408}
6409
6410
6411#undef __
6412
6413} }  // namespace v8::internal
6414
6415#endif  // V8_TARGET_ARCH_IA32
6416