lithium-codegen-ia32.cc revision b8e0da25ee8efac3bb05cd6b2730aafbd96119f4
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_IA32)
31
32#include "ia32/lithium-codegen-ia32.h"
33#include "code-stubs.h"
34#include "stub-cache.h"
35
36namespace v8 {
37namespace internal {
38
39
40class SafepointGenerator : public PostCallGenerator {
41 public:
42  SafepointGenerator(LCodeGen* codegen,
43                     LPointerMap* pointers,
44                     int deoptimization_index)
45      : codegen_(codegen),
46        pointers_(pointers),
47        deoptimization_index_(deoptimization_index) { }
48  virtual ~SafepointGenerator() { }
49
50  virtual void Generate() {
51    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
52  }
53
54 private:
55  LCodeGen* codegen_;
56  LPointerMap* pointers_;
57  int deoptimization_index_;
58};
59
60
61#define __ masm()->
62
63bool LCodeGen::GenerateCode() {
64  HPhase phase("Code generation", chunk());
65  ASSERT(is_unused());
66  status_ = GENERATING;
67  CpuFeatures::Scope scope(SSE2);
68  return GeneratePrologue() &&
69      GenerateBody() &&
70      GenerateDeferredCode() &&
71      GenerateSafepointTable();
72}
73
74
75void LCodeGen::FinishCode(Handle<Code> code) {
76  ASSERT(is_done());
77  code->set_stack_slots(StackSlotCount());
78  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
79  PopulateDeoptimizationData(code);
80}
81
82
83void LCodeGen::Abort(const char* format, ...) {
84  if (FLAG_trace_bailout) {
85    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
86    PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
87    va_list arguments;
88    va_start(arguments, format);
89    OS::VPrint(format, arguments);
90    va_end(arguments);
91    PrintF("\n");
92  }
93  status_ = ABORTED;
94}
95
96
97void LCodeGen::Comment(const char* format, ...) {
98  if (!FLAG_code_comments) return;
99  char buffer[4 * KB];
100  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
101  va_list arguments;
102  va_start(arguments, format);
103  builder.AddFormattedList(format, arguments);
104  va_end(arguments);
105
106  // Copy the string before recording it in the assembler to avoid
107  // issues when the stack allocated buffer goes out of scope.
108  size_t length = builder.position();
109  Vector<char> copy = Vector<char>::New(length + 1);
110  memcpy(copy.start(), builder.Finalize(), copy.length());
111  masm()->RecordComment(copy.start());
112}
113
114
115bool LCodeGen::GeneratePrologue() {
116  ASSERT(is_generating());
117
118#ifdef DEBUG
119  if (strlen(FLAG_stop_at) > 0 &&
120      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
121    __ int3();
122  }
123#endif
124
125  __ push(ebp);  // Caller's frame pointer.
126  __ mov(ebp, esp);
127  __ push(esi);  // Callee's context.
128  __ push(edi);  // Callee's JS function.
129
130  // Reserve space for the stack slots needed by the code.
131  int slots = StackSlotCount();
132  if (slots > 0) {
133    if (FLAG_debug_code) {
134      __ mov(Operand(eax), Immediate(slots));
135      Label loop;
136      __ bind(&loop);
137      __ push(Immediate(kSlotsZapValue));
138      __ dec(eax);
139      __ j(not_zero, &loop);
140    } else {
141      __ sub(Operand(esp), Immediate(slots * kPointerSize));
142#ifdef _MSC_VER
143      // On windows, you may not access the stack more than one page below
144      // the most recently mapped page. To make the allocated area randomly
145      // accessible, we write to each page in turn (the value is irrelevant).
146      const int kPageSize = 4 * KB;
147      for (int offset = slots * kPointerSize - kPageSize;
148           offset > 0;
149           offset -= kPageSize) {
150        __ mov(Operand(esp, offset), eax);
151      }
152#endif
153    }
154  }
155
156  // Trace the call.
157  if (FLAG_trace) {
158    __ CallRuntime(Runtime::kTraceEnter, 0);
159  }
160  return !is_aborted();
161}
162
163
164bool LCodeGen::GenerateBody() {
165  ASSERT(is_generating());
166  bool emit_instructions = true;
167  for (current_instruction_ = 0;
168       !is_aborted() && current_instruction_ < instructions_->length();
169       current_instruction_++) {
170    LInstruction* instr = instructions_->at(current_instruction_);
171    if (instr->IsLabel()) {
172      LLabel* label = LLabel::cast(instr);
173      emit_instructions = !label->HasReplacement();
174    }
175
176    if (emit_instructions) {
177      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
178      instr->CompileToNative(this);
179    }
180  }
181  return !is_aborted();
182}
183
184
185LInstruction* LCodeGen::GetNextInstruction() {
186  if (current_instruction_ < instructions_->length() - 1) {
187    return instructions_->at(current_instruction_ + 1);
188  } else {
189    return NULL;
190  }
191}
192
193
194bool LCodeGen::GenerateDeferredCode() {
195  ASSERT(is_generating());
196  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
197    LDeferredCode* code = deferred_[i];
198    __ bind(code->entry());
199    code->Generate();
200    __ jmp(code->exit());
201  }
202
203  // Deferred code is the last part of the instruction sequence. Mark
204  // the generated code as done unless we bailed out.
205  if (!is_aborted()) status_ = DONE;
206  return !is_aborted();
207}
208
209
210bool LCodeGen::GenerateSafepointTable() {
211  ASSERT(is_done());
212  safepoints_.Emit(masm(), StackSlotCount());
213  return !is_aborted();
214}
215
216
217Register LCodeGen::ToRegister(int index) const {
218  return Register::FromAllocationIndex(index);
219}
220
221
222XMMRegister LCodeGen::ToDoubleRegister(int index) const {
223  return XMMRegister::FromAllocationIndex(index);
224}
225
226
227Register LCodeGen::ToRegister(LOperand* op) const {
228  ASSERT(op->IsRegister());
229  return ToRegister(op->index());
230}
231
232
233XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
234  ASSERT(op->IsDoubleRegister());
235  return ToDoubleRegister(op->index());
236}
237
238
239int LCodeGen::ToInteger32(LConstantOperand* op) const {
240  Handle<Object> value = chunk_->LookupLiteral(op);
241  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
242  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
243      value->Number());
244  return static_cast<int32_t>(value->Number());
245}
246
247
248Immediate LCodeGen::ToImmediate(LOperand* op) {
249  LConstantOperand* const_op = LConstantOperand::cast(op);
250  Handle<Object> literal = chunk_->LookupLiteral(const_op);
251  Representation r = chunk_->LookupLiteralRepresentation(const_op);
252  if (r.IsInteger32()) {
253    ASSERT(literal->IsNumber());
254    return Immediate(static_cast<int32_t>(literal->Number()));
255  } else if (r.IsDouble()) {
256    Abort("unsupported double immediate");
257  }
258  ASSERT(r.IsTagged());
259  return Immediate(literal);
260}
261
262
263Operand LCodeGen::ToOperand(LOperand* op) const {
264  if (op->IsRegister()) return Operand(ToRegister(op));
265  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
266  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
267  int index = op->index();
268  if (index >= 0) {
269    // Local or spill slot. Skip the frame pointer, function, and
270    // context in the fixed part of the frame.
271    return Operand(ebp, -(index + 3) * kPointerSize);
272  } else {
273    // Incoming parameter. Skip the return address.
274    return Operand(ebp, -(index - 1) * kPointerSize);
275  }
276}
277
278
279Operand LCodeGen::HighOperand(LOperand* op) {
280  ASSERT(op->IsDoubleStackSlot());
281  int index = op->index();
282  int offset = (index >= 0) ? index + 3 : index - 1;
283  return Operand(ebp, -offset * kPointerSize);
284}
285
286
287void LCodeGen::WriteTranslation(LEnvironment* environment,
288                                Translation* translation) {
289  if (environment == NULL) return;
290
291  // The translation includes one command per value in the environment.
292  int translation_size = environment->values()->length();
293  // The output frame height does not include the parameters.
294  int height = translation_size - environment->parameter_count();
295
296  WriteTranslation(environment->outer(), translation);
297  int closure_id = DefineDeoptimizationLiteral(environment->closure());
298  translation->BeginFrame(environment->ast_id(), closure_id, height);
299  for (int i = 0; i < translation_size; ++i) {
300    LOperand* value = environment->values()->at(i);
301    // spilled_registers_ and spilled_double_registers_ are either
302    // both NULL or both set.
303    if (environment->spilled_registers() != NULL && value != NULL) {
304      if (value->IsRegister() &&
305          environment->spilled_registers()[value->index()] != NULL) {
306        translation->MarkDuplicate();
307        AddToTranslation(translation,
308                         environment->spilled_registers()[value->index()],
309                         environment->HasTaggedValueAt(i));
310      } else if (
311          value->IsDoubleRegister() &&
312          environment->spilled_double_registers()[value->index()] != NULL) {
313        translation->MarkDuplicate();
314        AddToTranslation(
315            translation,
316            environment->spilled_double_registers()[value->index()],
317            false);
318      }
319    }
320
321    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
322  }
323}
324
325
326void LCodeGen::AddToTranslation(Translation* translation,
327                                LOperand* op,
328                                bool is_tagged) {
329  if (op == NULL) {
330    // TODO(twuerthinger): Introduce marker operands to indicate that this value
331    // is not present and must be reconstructed from the deoptimizer. Currently
332    // this is only used for the arguments object.
333    translation->StoreArgumentsObject();
334  } else if (op->IsStackSlot()) {
335    if (is_tagged) {
336      translation->StoreStackSlot(op->index());
337    } else {
338      translation->StoreInt32StackSlot(op->index());
339    }
340  } else if (op->IsDoubleStackSlot()) {
341    translation->StoreDoubleStackSlot(op->index());
342  } else if (op->IsArgument()) {
343    ASSERT(is_tagged);
344    int src_index = StackSlotCount() + op->index();
345    translation->StoreStackSlot(src_index);
346  } else if (op->IsRegister()) {
347    Register reg = ToRegister(op);
348    if (is_tagged) {
349      translation->StoreRegister(reg);
350    } else {
351      translation->StoreInt32Register(reg);
352    }
353  } else if (op->IsDoubleRegister()) {
354    XMMRegister reg = ToDoubleRegister(op);
355    translation->StoreDoubleRegister(reg);
356  } else if (op->IsConstantOperand()) {
357    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
358    int src_index = DefineDeoptimizationLiteral(literal);
359    translation->StoreLiteral(src_index);
360  } else {
361    UNREACHABLE();
362  }
363}
364
365
366void LCodeGen::CallCode(Handle<Code> code,
367                        RelocInfo::Mode mode,
368                        LInstruction* instr) {
369  if (instr != NULL) {
370    LPointerMap* pointers = instr->pointer_map();
371    RecordPosition(pointers->position());
372    __ call(code, mode);
373    RegisterLazyDeoptimization(instr);
374  } else {
375    LPointerMap no_pointers(0);
376    RecordPosition(no_pointers.position());
377    __ call(code, mode);
378    RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
379  }
380
381  // Signal that we don't inline smi code before these stubs in the
382  // optimizing code generator.
383  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
384      code->kind() == Code::COMPARE_IC) {
385    __ nop();
386  }
387}
388
389
390void LCodeGen::CallRuntime(Runtime::Function* function,
391                           int num_arguments,
392                           LInstruction* instr) {
393  ASSERT(instr != NULL);
394  LPointerMap* pointers = instr->pointer_map();
395  ASSERT(pointers != NULL);
396  RecordPosition(pointers->position());
397
398  __ CallRuntime(function, num_arguments);
399  // Runtime calls to Throw are not supposed to ever return at the
400  // call site, so don't register lazy deoptimization for these. We do
401  // however have to record a safepoint since throwing exceptions can
402  // cause garbage collections.
403  // BUG(3243555): register a lazy deoptimization point at throw. We need
404  // it to be able to inline functions containing a throw statement.
405  if (!instr->IsThrow()) {
406    RegisterLazyDeoptimization(instr);
407  } else {
408    RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
409  }
410}
411
412
413void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
414  // Create the environment to bailout to. If the call has side effects
415  // execution has to continue after the call otherwise execution can continue
416  // from a previous bailout point repeating the call.
417  LEnvironment* deoptimization_environment;
418  if (instr->HasDeoptimizationEnvironment()) {
419    deoptimization_environment = instr->deoptimization_environment();
420  } else {
421    deoptimization_environment = instr->environment();
422  }
423
424  RegisterEnvironmentForDeoptimization(deoptimization_environment);
425  RecordSafepoint(instr->pointer_map(),
426                  deoptimization_environment->deoptimization_index());
427}
428
429
430void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
431  if (!environment->HasBeenRegistered()) {
432    // Physical stack frame layout:
433    // -x ............. -4  0 ..................................... y
434    // [incoming arguments] [spill slots] [pushed outgoing arguments]
435
436    // Layout of the environment:
437    // 0 ..................................................... size-1
438    // [parameters] [locals] [expression stack including arguments]
439
440    // Layout of the translation:
441    // 0 ........................................................ size - 1 + 4
442    // [expression stack including arguments] [locals] [4 words] [parameters]
443    // |>------------  translation_size ------------<|
444
445    int frame_count = 0;
446    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
447      ++frame_count;
448    }
449    Translation translation(&translations_, frame_count);
450    WriteTranslation(environment, &translation);
451    int deoptimization_index = deoptimizations_.length();
452    environment->Register(deoptimization_index, translation.index());
453    deoptimizations_.Add(environment);
454  }
455}
456
457
458void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
459  RegisterEnvironmentForDeoptimization(environment);
460  ASSERT(environment->HasBeenRegistered());
461  int id = environment->deoptimization_index();
462  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
463  ASSERT(entry != NULL);
464  if (entry == NULL) {
465    Abort("bailout was not prepared");
466    return;
467  }
468
469  if (FLAG_deopt_every_n_times != 0) {
470    Handle<SharedFunctionInfo> shared(info_->shared_info());
471    Label no_deopt;
472    __ pushfd();
473    __ push(eax);
474    __ push(ebx);
475    __ mov(ebx, shared);
476    __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
477    __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
478    __ j(not_zero, &no_deopt);
479    if (FLAG_trap_on_deopt) __ int3();
480    __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
481    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
482    __ pop(ebx);
483    __ pop(eax);
484    __ popfd();
485    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
486
487    __ bind(&no_deopt);
488    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
489    __ pop(ebx);
490    __ pop(eax);
491    __ popfd();
492  }
493
494  if (cc == no_condition) {
495    if (FLAG_trap_on_deopt) __ int3();
496    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
497  } else {
498    if (FLAG_trap_on_deopt) {
499      NearLabel done;
500      __ j(NegateCondition(cc), &done);
501      __ int3();
502      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
503      __ bind(&done);
504    } else {
505      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
506    }
507  }
508}
509
510
511void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
512  int length = deoptimizations_.length();
513  if (length == 0) return;
514  ASSERT(FLAG_deopt);
515  Handle<DeoptimizationInputData> data =
516      Factory::NewDeoptimizationInputData(length, TENURED);
517
518  data->SetTranslationByteArray(*translations_.CreateByteArray());
519  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
520
521  Handle<FixedArray> literals =
522      Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
523  for (int i = 0; i < deoptimization_literals_.length(); i++) {
524    literals->set(i, *deoptimization_literals_[i]);
525  }
526  data->SetLiteralArray(*literals);
527
528  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
529  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
530
531  // Populate the deoptimization entries.
532  for (int i = 0; i < length; i++) {
533    LEnvironment* env = deoptimizations_[i];
534    data->SetAstId(i, Smi::FromInt(env->ast_id()));
535    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
536    data->SetArgumentsStackHeight(i,
537                                  Smi::FromInt(env->arguments_stack_height()));
538  }
539  code->set_deoptimization_data(*data);
540}
541
542
543int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
544  int result = deoptimization_literals_.length();
545  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
546    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
547  }
548  deoptimization_literals_.Add(literal);
549  return result;
550}
551
552
553void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
554  ASSERT(deoptimization_literals_.length() == 0);
555
556  const ZoneList<Handle<JSFunction> >* inlined_closures =
557      chunk()->inlined_closures();
558
559  for (int i = 0, length = inlined_closures->length();
560       i < length;
561       i++) {
562    DefineDeoptimizationLiteral(inlined_closures->at(i));
563  }
564
565  inlined_function_count_ = deoptimization_literals_.length();
566}
567
568
569void LCodeGen::RecordSafepoint(LPointerMap* pointers,
570                               int deoptimization_index) {
571  const ZoneList<LOperand*>* operands = pointers->operands();
572  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
573                                                    deoptimization_index);
574  for (int i = 0; i < operands->length(); i++) {
575    LOperand* pointer = operands->at(i);
576    if (pointer->IsStackSlot()) {
577      safepoint.DefinePointerSlot(pointer->index());
578    }
579  }
580}
581
582
583void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
584                                            int arguments,
585                                            int deoptimization_index) {
586  const ZoneList<LOperand*>* operands = pointers->operands();
587  Safepoint safepoint =
588      safepoints_.DefineSafepointWithRegisters(
589          masm(), arguments, deoptimization_index);
590  for (int i = 0; i < operands->length(); i++) {
591    LOperand* pointer = operands->at(i);
592    if (pointer->IsStackSlot()) {
593      safepoint.DefinePointerSlot(pointer->index());
594    } else if (pointer->IsRegister()) {
595      safepoint.DefinePointerRegister(ToRegister(pointer));
596    }
597  }
598  // Register esi always contains a pointer to the context.
599  safepoint.DefinePointerRegister(esi);
600}
601
602
603void LCodeGen::RecordPosition(int position) {
604  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
605  masm()->positions_recorder()->RecordPosition(position);
606}
607
608
609void LCodeGen::DoLabel(LLabel* label) {
610  if (label->is_loop_header()) {
611    Comment(";;; B%d - LOOP entry", label->block_id());
612  } else {
613    Comment(";;; B%d", label->block_id());
614  }
615  __ bind(label->label());
616  current_block_ = label->block_id();
617  LCodeGen::DoGap(label);
618}
619
620
621void LCodeGen::DoParallelMove(LParallelMove* move) {
622  resolver_.Resolve(move);
623}
624
625
626void LCodeGen::DoGap(LGap* gap) {
627  for (int i = LGap::FIRST_INNER_POSITION;
628       i <= LGap::LAST_INNER_POSITION;
629       i++) {
630    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
631    LParallelMove* move = gap->GetParallelMove(inner_pos);
632    if (move != NULL) DoParallelMove(move);
633  }
634
635  LInstruction* next = GetNextInstruction();
636  if (next != NULL && next->IsLazyBailout()) {
637    int pc = masm()->pc_offset();
638    safepoints_.SetPcAfterGap(pc);
639  }
640}
641
642
643void LCodeGen::DoParameter(LParameter* instr) {
644  // Nothing to do.
645}
646
647
648void LCodeGen::DoCallStub(LCallStub* instr) {
649  ASSERT(ToRegister(instr->result()).is(eax));
650  switch (instr->hydrogen()->major_key()) {
651    case CodeStub::RegExpConstructResult: {
652      RegExpConstructResultStub stub;
653      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
654      break;
655    }
656    case CodeStub::RegExpExec: {
657      RegExpExecStub stub;
658      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
659      break;
660    }
661    case CodeStub::SubString: {
662      SubStringStub stub;
663      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
664      break;
665    }
666    case CodeStub::StringCharAt: {
667      StringCharAtStub stub;
668      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
669      break;
670    }
671    case CodeStub::MathPow: {
672      MathPowStub stub;
673      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
674      break;
675    }
676    case CodeStub::NumberToString: {
677      NumberToStringStub stub;
678      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
679      break;
680    }
681    case CodeStub::StringAdd: {
682      StringAddStub stub(NO_STRING_ADD_FLAGS);
683      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
684      break;
685    }
686    case CodeStub::StringCompare: {
687      StringCompareStub stub;
688      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
689      break;
690    }
691    case CodeStub::TranscendentalCache: {
692      TranscendentalCacheStub stub(instr->transcendental_type(),
693                                   TranscendentalCacheStub::TAGGED);
694      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
695      break;
696    }
697    default:
698      UNREACHABLE();
699  }
700}
701
702
703void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
704  // Nothing to do.
705}
706
707
708void LCodeGen::DoModI(LModI* instr) {
709  LOperand* right = instr->InputAt(1);
710  ASSERT(ToRegister(instr->result()).is(edx));
711  ASSERT(ToRegister(instr->InputAt(0)).is(eax));
712  ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
713  ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
714
715  Register right_reg = ToRegister(right);
716
717  // Check for x % 0.
718  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
719    __ test(right_reg, ToOperand(right));
720    DeoptimizeIf(zero, instr->environment());
721  }
722
723  // Sign extend to edx.
724  __ cdq();
725
726  // Check for (0 % -x) that will produce negative zero.
727  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
728    NearLabel positive_left;
729    NearLabel done;
730    __ test(eax, Operand(eax));
731    __ j(not_sign, &positive_left);
732    __ idiv(right_reg);
733
734    // Test the remainder for 0, because then the result would be -0.
735    __ test(edx, Operand(edx));
736    __ j(not_zero, &done);
737
738    DeoptimizeIf(no_condition, instr->environment());
739    __ bind(&positive_left);
740    __ idiv(right_reg);
741    __ bind(&done);
742  } else {
743    __ idiv(right_reg);
744  }
745}
746
747
748void LCodeGen::DoDivI(LDivI* instr) {
749  LOperand* right = instr->InputAt(1);
750  ASSERT(ToRegister(instr->result()).is(eax));
751  ASSERT(ToRegister(instr->InputAt(0)).is(eax));
752  ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
753  ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
754
755  Register left_reg = eax;
756
757  // Check for x / 0.
758  Register right_reg = ToRegister(right);
759  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
760    __ test(right_reg, ToOperand(right));
761    DeoptimizeIf(zero, instr->environment());
762  }
763
764  // Check for (0 / -x) that will produce negative zero.
765  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
766    NearLabel left_not_zero;
767    __ test(left_reg, Operand(left_reg));
768    __ j(not_zero, &left_not_zero);
769    __ test(right_reg, ToOperand(right));
770    DeoptimizeIf(sign, instr->environment());
771    __ bind(&left_not_zero);
772  }
773
774  // Check for (-kMinInt / -1).
775  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
776    NearLabel left_not_min_int;
777    __ cmp(left_reg, kMinInt);
778    __ j(not_zero, &left_not_min_int);
779    __ cmp(right_reg, -1);
780    DeoptimizeIf(zero, instr->environment());
781    __ bind(&left_not_min_int);
782  }
783
784  // Sign extend to edx.
785  __ cdq();
786  __ idiv(right_reg);
787
788  // Deoptimize if remainder is not 0.
789  __ test(edx, Operand(edx));
790  DeoptimizeIf(not_zero, instr->environment());
791}
792
793
794void LCodeGen::DoMulI(LMulI* instr) {
795  Register left = ToRegister(instr->InputAt(0));
796  LOperand* right = instr->InputAt(1);
797
798  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
799    __ mov(ToRegister(instr->TempAt(0)), left);
800  }
801
802  if (right->IsConstantOperand()) {
803    __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
804  } else {
805    __ imul(left, ToOperand(right));
806  }
807
808  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
809    DeoptimizeIf(overflow, instr->environment());
810  }
811
812  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
813    // Bail out if the result is supposed to be negative zero.
814    NearLabel done;
815    __ test(left, Operand(left));
816    __ j(not_zero, &done);
817    if (right->IsConstantOperand()) {
818      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
819        DeoptimizeIf(no_condition, instr->environment());
820      }
821    } else {
822      // Test the non-zero operand for negative sign.
823      __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
824      DeoptimizeIf(sign, instr->environment());
825    }
826    __ bind(&done);
827  }
828}
829
830
831void LCodeGen::DoBitI(LBitI* instr) {
832  LOperand* left = instr->InputAt(0);
833  LOperand* right = instr->InputAt(1);
834  ASSERT(left->Equals(instr->result()));
835  ASSERT(left->IsRegister());
836
837  if (right->IsConstantOperand()) {
838    int right_operand = ToInteger32(LConstantOperand::cast(right));
839    switch (instr->op()) {
840      case Token::BIT_AND:
841        __ and_(ToRegister(left), right_operand);
842        break;
843      case Token::BIT_OR:
844        __ or_(ToRegister(left), right_operand);
845        break;
846      case Token::BIT_XOR:
847        __ xor_(ToRegister(left), right_operand);
848        break;
849      default:
850        UNREACHABLE();
851        break;
852    }
853  } else {
854    switch (instr->op()) {
855      case Token::BIT_AND:
856        __ and_(ToRegister(left), ToOperand(right));
857        break;
858      case Token::BIT_OR:
859        __ or_(ToRegister(left), ToOperand(right));
860        break;
861      case Token::BIT_XOR:
862        __ xor_(ToRegister(left), ToOperand(right));
863        break;
864      default:
865        UNREACHABLE();
866        break;
867    }
868  }
869}
870
871
872void LCodeGen::DoShiftI(LShiftI* instr) {
873  LOperand* left = instr->InputAt(0);
874  LOperand* right = instr->InputAt(1);
875  ASSERT(left->Equals(instr->result()));
876  ASSERT(left->IsRegister());
877  if (right->IsRegister()) {
878    ASSERT(ToRegister(right).is(ecx));
879
880    switch (instr->op()) {
881      case Token::SAR:
882        __ sar_cl(ToRegister(left));
883        break;
884      case Token::SHR:
885        __ shr_cl(ToRegister(left));
886        if (instr->can_deopt()) {
887          __ test(ToRegister(left), Immediate(0x80000000));
888          DeoptimizeIf(not_zero, instr->environment());
889        }
890        break;
891      case Token::SHL:
892        __ shl_cl(ToRegister(left));
893        break;
894      default:
895        UNREACHABLE();
896        break;
897    }
898  } else {
899    int value = ToInteger32(LConstantOperand::cast(right));
900    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
901    switch (instr->op()) {
902      case Token::SAR:
903        if (shift_count != 0) {
904          __ sar(ToRegister(left), shift_count);
905        }
906        break;
907      case Token::SHR:
908        if (shift_count == 0 && instr->can_deopt()) {
909          __ test(ToRegister(left), Immediate(0x80000000));
910          DeoptimizeIf(not_zero, instr->environment());
911        } else {
912          __ shr(ToRegister(left), shift_count);
913        }
914        break;
915      case Token::SHL:
916        if (shift_count != 0) {
917          __ shl(ToRegister(left), shift_count);
918        }
919        break;
920      default:
921        UNREACHABLE();
922        break;
923    }
924  }
925}
926
927
928void LCodeGen::DoSubI(LSubI* instr) {
929  LOperand* left = instr->InputAt(0);
930  LOperand* right = instr->InputAt(1);
931  ASSERT(left->Equals(instr->result()));
932
933  if (right->IsConstantOperand()) {
934    __ sub(ToOperand(left), ToImmediate(right));
935  } else {
936    __ sub(ToRegister(left), ToOperand(right));
937  }
938  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
939    DeoptimizeIf(overflow, instr->environment());
940  }
941}
942
943
944void LCodeGen::DoConstantI(LConstantI* instr) {
945  ASSERT(instr->result()->IsRegister());
946  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
947}
948
949
950void LCodeGen::DoConstantD(LConstantD* instr) {
951  ASSERT(instr->result()->IsDoubleRegister());
952  XMMRegister res = ToDoubleRegister(instr->result());
953  double v = instr->value();
954  // Use xor to produce +0.0 in a fast and compact way, but avoid to
955  // do so if the constant is -0.0.
956  if (BitCast<uint64_t, double>(v) == 0) {
957    __ xorpd(res, res);
958  } else {
959    int32_t v_int32 = static_cast<int32_t>(v);
960    if (static_cast<double>(v_int32) == v) {
961      __ push_imm32(v_int32);
962      __ cvtsi2sd(res, Operand(esp, 0));
963      __ add(Operand(esp), Immediate(kPointerSize));
964    } else {
965      uint64_t int_val = BitCast<uint64_t, double>(v);
966      int32_t lower = static_cast<int32_t>(int_val);
967      int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
968      __ push_imm32(upper);
969      __ push_imm32(lower);
970      __ movdbl(res, Operand(esp, 0));
971      __ add(Operand(esp), Immediate(2 * kPointerSize));
972    }
973  }
974}
975
976
977void LCodeGen::DoConstantT(LConstantT* instr) {
978  ASSERT(instr->result()->IsRegister());
979  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
980}
981
982
983void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
984  Register result = ToRegister(instr->result());
985  Register array = ToRegister(instr->InputAt(0));
986  __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
987}
988
989
990void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
991  Register result = ToRegister(instr->result());
992  Register array = ToRegister(instr->InputAt(0));
993  __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
994}
995
996
997void LCodeGen::DoValueOf(LValueOf* instr) {
998  Register input = ToRegister(instr->InputAt(0));
999  Register result = ToRegister(instr->result());
1000  Register map = ToRegister(instr->TempAt(0));
1001  ASSERT(input.is(result));
1002  NearLabel done;
1003  // If the object is a smi return the object.
1004  __ test(input, Immediate(kSmiTagMask));
1005  __ j(zero, &done);
1006
1007  // If the object is not a value type, return the object.
1008  __ CmpObjectType(input, JS_VALUE_TYPE, map);
1009  __ j(not_equal, &done);
1010  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1011
1012  __ bind(&done);
1013}
1014
1015
1016void LCodeGen::DoBitNotI(LBitNotI* instr) {
1017  LOperand* input = instr->InputAt(0);
1018  ASSERT(input->Equals(instr->result()));
1019  __ not_(ToRegister(input));
1020}
1021
1022
1023void LCodeGen::DoThrow(LThrow* instr) {
1024  __ push(ToOperand(instr->InputAt(0)));
1025  CallRuntime(Runtime::kThrow, 1, instr);
1026
1027  if (FLAG_debug_code) {
1028    Comment("Unreachable code.");
1029    __ int3();
1030  }
1031}
1032
1033
1034void LCodeGen::DoAddI(LAddI* instr) {
1035  LOperand* left = instr->InputAt(0);
1036  LOperand* right = instr->InputAt(1);
1037  ASSERT(left->Equals(instr->result()));
1038
1039  if (right->IsConstantOperand()) {
1040    __ add(ToOperand(left), ToImmediate(right));
1041  } else {
1042    __ add(ToRegister(left), ToOperand(right));
1043  }
1044
1045  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1046    DeoptimizeIf(overflow, instr->environment());
1047  }
1048}
1049
1050
1051void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1052  LOperand* left = instr->InputAt(0);
1053  LOperand* right = instr->InputAt(1);
1054  // Modulo uses a fixed result register.
1055  ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
1056  switch (instr->op()) {
1057    case Token::ADD:
1058      __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
1059      break;
1060    case Token::SUB:
1061       __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
1062       break;
1063    case Token::MUL:
1064      __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
1065      break;
1066    case Token::DIV:
1067      __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
1068      break;
1069    case Token::MOD: {
1070      // Pass two doubles as arguments on the stack.
1071      __ PrepareCallCFunction(4, eax);
1072      __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
1073      __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
1074      __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
1075
1076      // Return value is in st(0) on ia32.
1077      // Store it into the (fixed) result register.
1078      __ sub(Operand(esp), Immediate(kDoubleSize));
1079      __ fstp_d(Operand(esp, 0));
1080      __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
1081      __ add(Operand(esp), Immediate(kDoubleSize));
1082      break;
1083    }
1084    default:
1085      UNREACHABLE();
1086      break;
1087  }
1088}
1089
1090
1091void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1092  ASSERT(ToRegister(instr->InputAt(0)).is(edx));
1093  ASSERT(ToRegister(instr->InputAt(1)).is(eax));
1094  ASSERT(ToRegister(instr->result()).is(eax));
1095
1096  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1097  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1098}
1099
1100
1101int LCodeGen::GetNextEmittedBlock(int block) {
1102  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1103    LLabel* label = chunk_->GetLabel(i);
1104    if (!label->HasReplacement()) return i;
1105  }
1106  return -1;
1107}
1108
1109
1110void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1111  int next_block = GetNextEmittedBlock(current_block_);
1112  right_block = chunk_->LookupDestination(right_block);
1113  left_block = chunk_->LookupDestination(left_block);
1114
1115  if (right_block == left_block) {
1116    EmitGoto(left_block);
1117  } else if (left_block == next_block) {
1118    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1119  } else if (right_block == next_block) {
1120    __ j(cc, chunk_->GetAssemblyLabel(left_block));
1121  } else {
1122    __ j(cc, chunk_->GetAssemblyLabel(left_block));
1123    __ jmp(chunk_->GetAssemblyLabel(right_block));
1124  }
1125}
1126
1127
1128void LCodeGen::DoBranch(LBranch* instr) {
1129  int true_block = chunk_->LookupDestination(instr->true_block_id());
1130  int false_block = chunk_->LookupDestination(instr->false_block_id());
1131
1132  Representation r = instr->hydrogen()->representation();
1133  if (r.IsInteger32()) {
1134    Register reg = ToRegister(instr->InputAt(0));
1135    __ test(reg, Operand(reg));
1136    EmitBranch(true_block, false_block, not_zero);
1137  } else if (r.IsDouble()) {
1138    XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
1139    __ xorpd(xmm0, xmm0);
1140    __ ucomisd(reg, xmm0);
1141    EmitBranch(true_block, false_block, not_equal);
1142  } else {
1143    ASSERT(r.IsTagged());
1144    Register reg = ToRegister(instr->InputAt(0));
1145    if (instr->hydrogen()->type().IsBoolean()) {
1146      __ cmp(reg, Factory::true_value());
1147      EmitBranch(true_block, false_block, equal);
1148    } else {
1149      Label* true_label = chunk_->GetAssemblyLabel(true_block);
1150      Label* false_label = chunk_->GetAssemblyLabel(false_block);
1151
1152      __ cmp(reg, Factory::undefined_value());
1153      __ j(equal, false_label);
1154      __ cmp(reg, Factory::true_value());
1155      __ j(equal, true_label);
1156      __ cmp(reg, Factory::false_value());
1157      __ j(equal, false_label);
1158      __ test(reg, Operand(reg));
1159      __ j(equal, false_label);
1160      __ test(reg, Immediate(kSmiTagMask));
1161      __ j(zero, true_label);
1162
1163      // Test for double values. Zero is false.
1164      NearLabel call_stub;
1165      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1166             Factory::heap_number_map());
1167      __ j(not_equal, &call_stub);
1168      __ fldz();
1169      __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
1170      __ FCmp();
1171      __ j(zero, false_label);
1172      __ jmp(true_label);
1173
1174      // The conversion stub doesn't cause garbage collections so it's
1175      // safe to not record a safepoint after the call.
1176      __ bind(&call_stub);
1177      ToBooleanStub stub;
1178      __ pushad();
1179      __ push(reg);
1180      __ CallStub(&stub);
1181      __ test(eax, Operand(eax));
1182      __ popad();
1183      EmitBranch(true_block, false_block, not_zero);
1184    }
1185  }
1186}
1187
1188
1189void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1190  block = chunk_->LookupDestination(block);
1191  int next_block = GetNextEmittedBlock(current_block_);
1192  if (block != next_block) {
1193    // Perform stack overflow check if this goto needs it before jumping.
1194    if (deferred_stack_check != NULL) {
1195      ExternalReference stack_limit =
1196          ExternalReference::address_of_stack_limit();
1197      __ cmp(esp, Operand::StaticVariable(stack_limit));
1198      __ j(above_equal, chunk_->GetAssemblyLabel(block));
1199      __ jmp(deferred_stack_check->entry());
1200      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1201    } else {
1202      __ jmp(chunk_->GetAssemblyLabel(block));
1203    }
1204  }
1205}
1206
1207
1208void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1209  __ pushad();
1210  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
1211  RecordSafepointWithRegisters(
1212      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1213  __ popad();
1214}
1215
1216void LCodeGen::DoGoto(LGoto* instr) {
1217  class DeferredStackCheck: public LDeferredCode {
1218   public:
1219    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1220        : LDeferredCode(codegen), instr_(instr) { }
1221    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1222   private:
1223    LGoto* instr_;
1224  };
1225
1226  DeferredStackCheck* deferred = NULL;
1227  if (instr->include_stack_check()) {
1228    deferred = new DeferredStackCheck(this, instr);
1229  }
1230  EmitGoto(instr->block_id(), deferred);
1231}
1232
1233
1234Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1235  Condition cond = no_condition;
1236  switch (op) {
1237    case Token::EQ:
1238    case Token::EQ_STRICT:
1239      cond = equal;
1240      break;
1241    case Token::LT:
1242      cond = is_unsigned ? below : less;
1243      break;
1244    case Token::GT:
1245      cond = is_unsigned ? above : greater;
1246      break;
1247    case Token::LTE:
1248      cond = is_unsigned ? below_equal : less_equal;
1249      break;
1250    case Token::GTE:
1251      cond = is_unsigned ? above_equal : greater_equal;
1252      break;
1253    case Token::IN:
1254    case Token::INSTANCEOF:
1255    default:
1256      UNREACHABLE();
1257  }
1258  return cond;
1259}
1260
1261
1262void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1263  if (right->IsConstantOperand()) {
1264    __ cmp(ToOperand(left), ToImmediate(right));
1265  } else {
1266    __ cmp(ToRegister(left), ToOperand(right));
1267  }
1268}
1269
1270
1271void LCodeGen::DoCmpID(LCmpID* instr) {
1272  LOperand* left = instr->InputAt(0);
1273  LOperand* right = instr->InputAt(1);
1274  LOperand* result = instr->result();
1275
1276  NearLabel unordered;
1277  if (instr->is_double()) {
1278    // Don't base result on EFLAGS when a NaN is involved. Instead
1279    // jump to the unordered case, which produces a false value.
1280    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1281    __ j(parity_even, &unordered, not_taken);
1282  } else {
1283    EmitCmpI(left, right);
1284  }
1285
1286  NearLabel done;
1287  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1288  __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
1289  __ j(cc, &done);
1290
1291  __ bind(&unordered);
1292  __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
1293  __ bind(&done);
1294}
1295
1296
1297void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1298  LOperand* left = instr->InputAt(0);
1299  LOperand* right = instr->InputAt(1);
1300  int false_block = chunk_->LookupDestination(instr->false_block_id());
1301  int true_block = chunk_->LookupDestination(instr->true_block_id());
1302
1303  if (instr->is_double()) {
1304    // Don't base result on EFLAGS when a NaN is involved. Instead
1305    // jump to the false block.
1306    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1307    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1308  } else {
1309    EmitCmpI(left, right);
1310  }
1311
1312  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1313  EmitBranch(true_block, false_block, cc);
1314}
1315
1316
1317void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1318  Register left = ToRegister(instr->InputAt(0));
1319  Register right = ToRegister(instr->InputAt(1));
1320  Register result = ToRegister(instr->result());
1321
1322  __ cmp(left, Operand(right));
1323  __ mov(result, Handle<Object>(Heap::true_value()));
1324  NearLabel done;
1325  __ j(equal, &done);
1326  __ mov(result, Handle<Object>(Heap::false_value()));
1327  __ bind(&done);
1328}
1329
1330
1331void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1332  Register left = ToRegister(instr->InputAt(0));
1333  Register right = ToRegister(instr->InputAt(1));
1334  int false_block = chunk_->LookupDestination(instr->false_block_id());
1335  int true_block = chunk_->LookupDestination(instr->true_block_id());
1336
1337  __ cmp(left, Operand(right));
1338  EmitBranch(true_block, false_block, equal);
1339}
1340
1341
1342void LCodeGen::DoIsNull(LIsNull* instr) {
1343  Register reg = ToRegister(instr->InputAt(0));
1344  Register result = ToRegister(instr->result());
1345
1346  // TODO(fsc): If the expression is known to be a smi, then it's
1347  // definitely not null. Materialize false.
1348
1349  __ cmp(reg, Factory::null_value());
1350  if (instr->is_strict()) {
1351    __ mov(result, Handle<Object>(Heap::true_value()));
1352    NearLabel done;
1353    __ j(equal, &done);
1354    __ mov(result, Handle<Object>(Heap::false_value()));
1355    __ bind(&done);
1356  } else {
1357    NearLabel true_value, false_value, done;
1358    __ j(equal, &true_value);
1359    __ cmp(reg, Factory::undefined_value());
1360    __ j(equal, &true_value);
1361    __ test(reg, Immediate(kSmiTagMask));
1362    __ j(zero, &false_value);
1363    // Check for undetectable objects by looking in the bit field in
1364    // the map. The object has already been smi checked.
1365    Register scratch = result;
1366    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1367    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1368    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1369    __ j(not_zero, &true_value);
1370    __ bind(&false_value);
1371    __ mov(result, Handle<Object>(Heap::false_value()));
1372    __ jmp(&done);
1373    __ bind(&true_value);
1374    __ mov(result, Handle<Object>(Heap::true_value()));
1375    __ bind(&done);
1376  }
1377}
1378
1379
1380void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1381  Register reg = ToRegister(instr->InputAt(0));
1382
1383  // TODO(fsc): If the expression is known to be a smi, then it's
1384  // definitely not null. Jump to the false block.
1385
1386  int true_block = chunk_->LookupDestination(instr->true_block_id());
1387  int false_block = chunk_->LookupDestination(instr->false_block_id());
1388
1389  __ cmp(reg, Factory::null_value());
1390  if (instr->is_strict()) {
1391    EmitBranch(true_block, false_block, equal);
1392  } else {
1393    Label* true_label = chunk_->GetAssemblyLabel(true_block);
1394    Label* false_label = chunk_->GetAssemblyLabel(false_block);
1395    __ j(equal, true_label);
1396    __ cmp(reg, Factory::undefined_value());
1397    __ j(equal, true_label);
1398    __ test(reg, Immediate(kSmiTagMask));
1399    __ j(zero, false_label);
1400    // Check for undetectable objects by looking in the bit field in
1401    // the map. The object has already been smi checked.
1402    Register scratch = ToRegister(instr->TempAt(0));
1403    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1404    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1405    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1406    EmitBranch(true_block, false_block, not_zero);
1407  }
1408}
1409
1410
1411Condition LCodeGen::EmitIsObject(Register input,
1412                                 Register temp1,
1413                                 Register temp2,
1414                                 Label* is_not_object,
1415                                 Label* is_object) {
1416  ASSERT(!input.is(temp1));
1417  ASSERT(!input.is(temp2));
1418  ASSERT(!temp1.is(temp2));
1419
1420  __ test(input, Immediate(kSmiTagMask));
1421  __ j(equal, is_not_object);
1422
1423  __ cmp(input, Factory::null_value());
1424  __ j(equal, is_object);
1425
1426  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
1427  // Undetectable objects behave like undefined.
1428  __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
1429  __ test(temp2, Immediate(1 << Map::kIsUndetectable));
1430  __ j(not_zero, is_not_object);
1431
1432  __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
1433  __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
1434  __ j(below, is_not_object);
1435  __ cmp(temp2, LAST_JS_OBJECT_TYPE);
1436  return below_equal;
1437}
1438
1439
1440void LCodeGen::DoIsObject(LIsObject* instr) {
1441  Register reg = ToRegister(instr->InputAt(0));
1442  Register result = ToRegister(instr->result());
1443  Register temp = ToRegister(instr->TempAt(0));
1444  Label is_false, is_true, done;
1445
1446  Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
1447  __ j(true_cond, &is_true);
1448
1449  __ bind(&is_false);
1450  __ mov(result, Handle<Object>(Heap::false_value()));
1451  __ jmp(&done);
1452
1453  __ bind(&is_true);
1454  __ mov(result, Handle<Object>(Heap::true_value()));
1455
1456  __ bind(&done);
1457}
1458
1459
1460void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1461  Register reg = ToRegister(instr->InputAt(0));
1462  Register temp = ToRegister(instr->TempAt(0));
1463  Register temp2 = ToRegister(instr->TempAt(1));
1464
1465  int true_block = chunk_->LookupDestination(instr->true_block_id());
1466  int false_block = chunk_->LookupDestination(instr->false_block_id());
1467  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1468  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1469
1470  Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
1471
1472  EmitBranch(true_block, false_block, true_cond);
1473}
1474
1475
1476void LCodeGen::DoIsSmi(LIsSmi* instr) {
1477  Operand input = ToOperand(instr->InputAt(0));
1478  Register result = ToRegister(instr->result());
1479
1480  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1481  __ test(input, Immediate(kSmiTagMask));
1482  __ mov(result, Handle<Object>(Heap::true_value()));
1483  NearLabel done;
1484  __ j(zero, &done);
1485  __ mov(result, Handle<Object>(Heap::false_value()));
1486  __ bind(&done);
1487}
1488
1489
1490void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1491  Operand input = ToOperand(instr->InputAt(0));
1492
1493  int true_block = chunk_->LookupDestination(instr->true_block_id());
1494  int false_block = chunk_->LookupDestination(instr->false_block_id());
1495
1496  __ test(input, Immediate(kSmiTagMask));
1497  EmitBranch(true_block, false_block, zero);
1498}
1499
1500
1501static InstanceType TestType(HHasInstanceType* instr) {
1502  InstanceType from = instr->from();
1503  InstanceType to = instr->to();
1504  if (from == FIRST_TYPE) return to;
1505  ASSERT(from == to || to == LAST_TYPE);
1506  return from;
1507}
1508
1509
1510
1511static Condition BranchCondition(HHasInstanceType* instr) {
1512  InstanceType from = instr->from();
1513  InstanceType to = instr->to();
1514  if (from == to) return equal;
1515  if (to == LAST_TYPE) return above_equal;
1516  if (from == FIRST_TYPE) return below_equal;
1517  UNREACHABLE();
1518  return equal;
1519}
1520
1521
1522void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1523  Register input = ToRegister(instr->InputAt(0));
1524  Register result = ToRegister(instr->result());
1525
1526  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1527  __ test(input, Immediate(kSmiTagMask));
1528  NearLabel done, is_false;
1529  __ j(zero, &is_false);
1530  __ CmpObjectType(input, TestType(instr->hydrogen()), result);
1531  __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
1532  __ mov(result, Handle<Object>(Heap::true_value()));
1533  __ jmp(&done);
1534  __ bind(&is_false);
1535  __ mov(result, Handle<Object>(Heap::false_value()));
1536  __ bind(&done);
1537}
1538
1539
1540void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1541  Register input = ToRegister(instr->InputAt(0));
1542  Register temp = ToRegister(instr->TempAt(0));
1543
1544  int true_block = chunk_->LookupDestination(instr->true_block_id());
1545  int false_block = chunk_->LookupDestination(instr->false_block_id());
1546
1547  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1548
1549  __ test(input, Immediate(kSmiTagMask));
1550  __ j(zero, false_label);
1551
1552  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
1553  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1554}
1555
1556
1557void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1558  Register input = ToRegister(instr->InputAt(0));
1559  Register result = ToRegister(instr->result());
1560
1561  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1562  __ mov(result, Handle<Object>(Heap::true_value()));
1563  __ test(FieldOperand(input, String::kHashFieldOffset),
1564          Immediate(String::kContainsCachedArrayIndexMask));
1565  NearLabel done;
1566  __ j(not_zero, &done);
1567  __ mov(result, Handle<Object>(Heap::false_value()));
1568  __ bind(&done);
1569}
1570
1571
1572void LCodeGen::DoHasCachedArrayIndexAndBranch(
1573    LHasCachedArrayIndexAndBranch* instr) {
1574  Register input = ToRegister(instr->InputAt(0));
1575
1576  int true_block = chunk_->LookupDestination(instr->true_block_id());
1577  int false_block = chunk_->LookupDestination(instr->false_block_id());
1578
1579  __ test(FieldOperand(input, String::kHashFieldOffset),
1580          Immediate(String::kContainsCachedArrayIndexMask));
1581  EmitBranch(true_block, false_block, not_equal);
1582}
1583
1584
1585// Branches to a label or falls through with the answer in the z flag.  Trashes
1586// the temp registers, but not the input.  Only input and temp2 may alias.
1587void LCodeGen::EmitClassOfTest(Label* is_true,
1588                               Label* is_false,
1589                               Handle<String>class_name,
1590                               Register input,
1591                               Register temp,
1592                               Register temp2) {
1593  ASSERT(!input.is(temp));
1594  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
1595  __ test(input, Immediate(kSmiTagMask));
1596  __ j(zero, is_false);
1597  __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1598  __ j(below, is_false);
1599
1600  // Map is now in temp.
1601  // Functions have class 'Function'.
1602  __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1603  if (class_name->IsEqualTo(CStrVector("Function"))) {
1604    __ j(equal, is_true);
1605  } else {
1606    __ j(equal, is_false);
1607  }
1608
1609  // Check if the constructor in the map is a function.
1610  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1611
1612  // As long as JS_FUNCTION_TYPE is the last instance type and it is
1613  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1614  // LAST_JS_OBJECT_TYPE.
1615  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1616  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1617
1618  // Objects with a non-function constructor have class 'Object'.
1619  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1620  if (class_name->IsEqualTo(CStrVector("Object"))) {
1621    __ j(not_equal, is_true);
1622  } else {
1623    __ j(not_equal, is_false);
1624  }
1625
1626  // temp now contains the constructor function. Grab the
1627  // instance class name from there.
1628  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1629  __ mov(temp, FieldOperand(temp,
1630                            SharedFunctionInfo::kInstanceClassNameOffset));
1631  // The class name we are testing against is a symbol because it's a literal.
1632  // The name in the constructor is a symbol because of the way the context is
1633  // booted.  This routine isn't expected to work for random API-created
1634  // classes and it doesn't have to because you can't access it with natives
1635  // syntax.  Since both sides are symbols it is sufficient to use an identity
1636  // comparison.
1637  __ cmp(temp, class_name);
1638  // End with the answer in the z flag.
1639}
1640
1641
1642void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1643  Register input = ToRegister(instr->InputAt(0));
1644  Register result = ToRegister(instr->result());
1645  ASSERT(input.is(result));
1646  Register temp = ToRegister(instr->TempAt(0));
1647  Handle<String> class_name = instr->hydrogen()->class_name();
1648  NearLabel done;
1649  Label is_true, is_false;
1650
1651  EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
1652
1653  __ j(not_equal, &is_false);
1654
1655  __ bind(&is_true);
1656  __ mov(result, Handle<Object>(Heap::true_value()));
1657  __ jmp(&done);
1658
1659  __ bind(&is_false);
1660  __ mov(result, Handle<Object>(Heap::false_value()));
1661  __ bind(&done);
1662}
1663
1664
1665void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1666  Register input = ToRegister(instr->InputAt(0));
1667  Register temp = ToRegister(instr->TempAt(0));
1668  Register temp2 = ToRegister(instr->TempAt(1));
1669  if (input.is(temp)) {
1670    // Swap.
1671    Register swapper = temp;
1672    temp = temp2;
1673    temp2 = swapper;
1674  }
1675  Handle<String> class_name = instr->hydrogen()->class_name();
1676
1677  int true_block = chunk_->LookupDestination(instr->true_block_id());
1678  int false_block = chunk_->LookupDestination(instr->false_block_id());
1679
1680  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1681  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1682
1683  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1684
1685  EmitBranch(true_block, false_block, equal);
1686}
1687
1688
1689void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1690  Register reg = ToRegister(instr->InputAt(0));
1691  int true_block = instr->true_block_id();
1692  int false_block = instr->false_block_id();
1693
1694  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1695  EmitBranch(true_block, false_block, equal);
1696}
1697
1698
1699void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1700  // Object and function are in fixed registers defined by the stub.
1701  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
1702  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1703
1704  NearLabel true_value, done;
1705  __ test(eax, Operand(eax));
1706  __ j(zero, &true_value);
1707  __ mov(ToRegister(instr->result()), Factory::false_value());
1708  __ jmp(&done);
1709  __ bind(&true_value);
1710  __ mov(ToRegister(instr->result()), Factory::true_value());
1711  __ bind(&done);
1712}
1713
1714
1715void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
1716  int true_block = chunk_->LookupDestination(instr->true_block_id());
1717  int false_block = chunk_->LookupDestination(instr->false_block_id());
1718
1719  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
1720  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1721  __ test(eax, Operand(eax));
1722  EmitBranch(true_block, false_block, zero);
1723}
1724
1725
1726void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1727  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1728   public:
1729    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1730                                  LInstanceOfKnownGlobal* instr)
1731        : LDeferredCode(codegen), instr_(instr) { }
1732    virtual void Generate() {
1733      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
1734    }
1735
1736    Label* map_check() { return &map_check_; }
1737
1738   private:
1739    LInstanceOfKnownGlobal* instr_;
1740    Label map_check_;
1741  };
1742
1743  DeferredInstanceOfKnownGlobal* deferred;
1744  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
1745
1746  Label done, false_result;
1747  Register object = ToRegister(instr->InputAt(0));
1748  Register temp = ToRegister(instr->TempAt(0));
1749
1750  // A Smi is not instance of anything.
1751  __ test(object, Immediate(kSmiTagMask));
1752  __ j(zero, &false_result, not_taken);
1753
1754  // This is the inlined call site instanceof cache. The two occourences of the
1755  // hole value will be patched to the last map/result pair generated by the
1756  // instanceof stub.
1757  NearLabel cache_miss;
1758  Register map = ToRegister(instr->TempAt(0));
1759  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
1760  __ bind(deferred->map_check());  // Label for calculating code patching.
1761  __ cmp(map, Factory::the_hole_value());  // Patched to cached map.
1762  __ j(not_equal, &cache_miss, not_taken);
1763  __ mov(eax, Factory::the_hole_value());  // Patched to either true or false.
1764  __ jmp(&done);
1765
1766  // The inlined call site cache did not match. Check null and string before
1767  // calling the deferred code.
1768  __ bind(&cache_miss);
1769  // Null is not instance of anything.
1770  __ cmp(object, Factory::null_value());
1771  __ j(equal, &false_result);
1772
1773  // String values are not instances of anything.
1774  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
1775  __ j(is_string, &false_result);
1776
1777  // Go to the deferred code.
1778  __ jmp(deferred->entry());
1779
1780  __ bind(&false_result);
1781  __ mov(ToRegister(instr->result()), Factory::false_value());
1782
1783  // Here result has either true or false. Deferred code also produces true or
1784  // false object.
1785  __ bind(deferred->exit());
1786  __ bind(&done);
1787}
1788
1789
1790void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
1791                                                Label* map_check) {
1792  __ PushSafepointRegisters();
1793
1794  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
1795  flags = static_cast<InstanceofStub::Flags>(
1796      flags | InstanceofStub::kArgsInRegisters);
1797  flags = static_cast<InstanceofStub::Flags>(
1798      flags | InstanceofStub::kCallSiteInlineCheck);
1799  flags = static_cast<InstanceofStub::Flags>(
1800      flags | InstanceofStub::kReturnTrueFalseObject);
1801  InstanceofStub stub(flags);
1802
1803  // Get the temp register reserved by the instruction. This needs to be edi as
1804  // its slot of the pushing of safepoint registers is used to communicate the
1805  // offset to the location of the map check.
1806  Register temp = ToRegister(instr->TempAt(0));
1807  ASSERT(temp.is(edi));
1808  __ mov(InstanceofStub::right(), Immediate(instr->function()));
1809  static const int kAdditionalDelta = 13;
1810  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
1811  Label before_push_delta;
1812  __ bind(&before_push_delta);
1813  __ mov(temp, Immediate(delta));
1814  __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
1815  __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
1816  ASSERT_EQ(kAdditionalDelta,
1817            masm_->SizeOfCodeGeneratedSince(&before_push_delta));
1818  RecordSafepointWithRegisters(
1819      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1820  // Put the result value into the eax slot and restore all registers.
1821  __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
1822
1823  __ PopSafepointRegisters();
1824}
1825
1826
1827static Condition ComputeCompareCondition(Token::Value op) {
1828  switch (op) {
1829    case Token::EQ_STRICT:
1830    case Token::EQ:
1831      return equal;
1832    case Token::LT:
1833      return less;
1834    case Token::GT:
1835      return greater;
1836    case Token::LTE:
1837      return less_equal;
1838    case Token::GTE:
1839      return greater_equal;
1840    default:
1841      UNREACHABLE();
1842      return no_condition;
1843  }
1844}
1845
1846
1847void LCodeGen::DoCmpT(LCmpT* instr) {
1848  Token::Value op = instr->op();
1849
1850  Handle<Code> ic = CompareIC::GetUninitialized(op);
1851  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1852
1853  Condition condition = ComputeCompareCondition(op);
1854  if (op == Token::GT || op == Token::LTE) {
1855    condition = ReverseCondition(condition);
1856  }
1857  NearLabel true_value, done;
1858  __ test(eax, Operand(eax));
1859  __ j(condition, &true_value);
1860  __ mov(ToRegister(instr->result()), Factory::false_value());
1861  __ jmp(&done);
1862  __ bind(&true_value);
1863  __ mov(ToRegister(instr->result()), Factory::true_value());
1864  __ bind(&done);
1865}
1866
1867
1868void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
1869  Token::Value op = instr->op();
1870  int true_block = chunk_->LookupDestination(instr->true_block_id());
1871  int false_block = chunk_->LookupDestination(instr->false_block_id());
1872
1873  Handle<Code> ic = CompareIC::GetUninitialized(op);
1874  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1875
1876  // The compare stub expects compare condition and the input operands
1877  // reversed for GT and LTE.
1878  Condition condition = ComputeCompareCondition(op);
1879  if (op == Token::GT || op == Token::LTE) {
1880    condition = ReverseCondition(condition);
1881  }
1882  __ test(eax, Operand(eax));
1883  EmitBranch(true_block, false_block, condition);
1884}
1885
1886
1887void LCodeGen::DoReturn(LReturn* instr) {
1888  if (FLAG_trace) {
1889    // Preserve the return value on the stack and rely on the runtime
1890    // call to return the value in the same register.
1891    __ push(eax);
1892    __ CallRuntime(Runtime::kTraceExit, 1);
1893  }
1894  __ mov(esp, ebp);
1895  __ pop(ebp);
1896  __ ret((ParameterCount() + 1) * kPointerSize);
1897}
1898
1899
1900void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
1901  Register result = ToRegister(instr->result());
1902  __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
1903  if (instr->hydrogen()->check_hole_value()) {
1904    __ cmp(result, Factory::the_hole_value());
1905    DeoptimizeIf(equal, instr->environment());
1906  }
1907}
1908
1909
1910void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
1911  Register value = ToRegister(instr->InputAt(0));
1912  __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
1913}
1914
1915
1916void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
1917  // TODO(antonm): load a context with a separate instruction.
1918  Register result = ToRegister(instr->result());
1919  __ LoadContext(result, instr->context_chain_length());
1920  __ mov(result, ContextOperand(result, instr->slot_index()));
1921}
1922
1923
1924void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
1925  Register object = ToRegister(instr->InputAt(0));
1926  Register result = ToRegister(instr->result());
1927  if (instr->hydrogen()->is_in_object()) {
1928    __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
1929  } else {
1930    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
1931    __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
1932  }
1933}
1934
1935
1936void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
1937  ASSERT(ToRegister(instr->object()).is(eax));
1938  ASSERT(ToRegister(instr->result()).is(eax));
1939
1940  __ mov(ecx, instr->name());
1941  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
1942  CallCode(ic, RelocInfo::CODE_TARGET, instr);
1943}
1944
1945
1946void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
1947  Register function = ToRegister(instr->function());
1948  Register temp = ToRegister(instr->TempAt(0));
1949  Register result = ToRegister(instr->result());
1950
1951  // Check that the function really is a function.
1952  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
1953  DeoptimizeIf(not_equal, instr->environment());
1954
1955  // Check whether the function has an instance prototype.
1956  NearLabel non_instance;
1957  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
1958            1 << Map::kHasNonInstancePrototype);
1959  __ j(not_zero, &non_instance);
1960
1961  // Get the prototype or initial map from the function.
1962  __ mov(result,
1963         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1964
1965  // Check that the function has a prototype or an initial map.
1966  __ cmp(Operand(result), Immediate(Factory::the_hole_value()));
1967  DeoptimizeIf(equal, instr->environment());
1968
1969  // If the function does not have an initial map, we're done.
1970  NearLabel done;
1971  __ CmpObjectType(result, MAP_TYPE, temp);
1972  __ j(not_equal, &done);
1973
1974  // Get the prototype from the initial map.
1975  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
1976  __ jmp(&done);
1977
1978  // Non-instance prototype: Fetch prototype from constructor field
1979  // in the function's map.
1980  __ bind(&non_instance);
1981  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
1982
1983  // All done.
1984  __ bind(&done);
1985}
1986
1987
1988void LCodeGen::DoLoadElements(LLoadElements* instr) {
1989  ASSERT(instr->result()->Equals(instr->InputAt(0)));
1990  Register reg = ToRegister(instr->InputAt(0));
1991  __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
1992  if (FLAG_debug_code) {
1993    NearLabel done;
1994    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1995           Immediate(Factory::fixed_array_map()));
1996    __ j(equal, &done);
1997    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1998           Immediate(Factory::fixed_cow_array_map()));
1999    __ Check(equal, "Check for fast elements failed.");
2000    __ bind(&done);
2001  }
2002}
2003
2004
2005void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2006  Register arguments = ToRegister(instr->arguments());
2007  Register length = ToRegister(instr->length());
2008  Operand index = ToOperand(instr->index());
2009  Register result = ToRegister(instr->result());
2010
2011  __ sub(length, index);
2012  DeoptimizeIf(below_equal, instr->environment());
2013
2014  // There are two words between the frame pointer and the last argument.
2015  // Subtracting from length accounts for one of them add one more.
2016  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2017}
2018
2019
2020void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2021  Register elements = ToRegister(instr->elements());
2022  Register key = ToRegister(instr->key());
2023  Register result = ToRegister(instr->result());
2024  ASSERT(result.is(elements));
2025
2026  // Load the result.
2027  __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2028
2029  // Check for the hole value.
2030  __ cmp(result, Factory::the_hole_value());
2031  DeoptimizeIf(equal, instr->environment());
2032}
2033
2034
2035void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2036  ASSERT(ToRegister(instr->object()).is(edx));
2037  ASSERT(ToRegister(instr->key()).is(eax));
2038
2039  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
2040  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2041}
2042
2043
2044void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2045  Register result = ToRegister(instr->result());
2046
2047  // Check for arguments adapter frame.
2048  NearLabel done, adapted;
2049  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2050  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
2051  __ cmp(Operand(result),
2052         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2053  __ j(equal, &adapted);
2054
2055  // No arguments adaptor frame.
2056  __ mov(result, Operand(ebp));
2057  __ jmp(&done);
2058
2059  // Arguments adaptor frame present.
2060  __ bind(&adapted);
2061  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2062
2063  // Result is the frame pointer for the frame if not adapted and for the real
2064  // frame below the adaptor frame if adapted.
2065  __ bind(&done);
2066}
2067
2068
2069void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2070  Operand elem = ToOperand(instr->InputAt(0));
2071  Register result = ToRegister(instr->result());
2072
2073  NearLabel done;
2074
2075  // If no arguments adaptor frame the number of arguments is fixed.
2076  __ cmp(ebp, elem);
2077  __ mov(result, Immediate(scope()->num_parameters()));
2078  __ j(equal, &done);
2079
2080  // Arguments adaptor frame present. Get argument length from there.
2081  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2082  __ mov(result, Operand(result,
2083                         ArgumentsAdaptorFrameConstants::kLengthOffset));
2084  __ SmiUntag(result);
2085
2086  // Argument length is in result register.
2087  __ bind(&done);
2088}
2089
2090
2091void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2092  Register receiver = ToRegister(instr->receiver());
2093  ASSERT(ToRegister(instr->function()).is(edi));
2094  ASSERT(ToRegister(instr->result()).is(eax));
2095
2096  // If the receiver is null or undefined, we have to pass the
2097  // global object as a receiver.
2098  NearLabel global_receiver, receiver_ok;
2099  __ cmp(receiver, Factory::null_value());
2100  __ j(equal, &global_receiver);
2101  __ cmp(receiver, Factory::undefined_value());
2102  __ j(not_equal, &receiver_ok);
2103  __ bind(&global_receiver);
2104  __ mov(receiver, GlobalObjectOperand());
2105  __ bind(&receiver_ok);
2106
2107  Register length = ToRegister(instr->length());
2108  Register elements = ToRegister(instr->elements());
2109
2110  Label invoke;
2111
2112  // Copy the arguments to this function possibly from the
2113  // adaptor frame below it.
2114  const uint32_t kArgumentsLimit = 1 * KB;
2115  __ cmp(length, kArgumentsLimit);
2116  DeoptimizeIf(above, instr->environment());
2117
2118  __ push(receiver);
2119  __ mov(receiver, length);
2120
2121  // Loop through the arguments pushing them onto the execution
2122  // stack.
2123  Label loop;
2124  // length is a small non-negative integer, due to the test above.
2125  __ test(length, Operand(length));
2126  __ j(zero, &invoke);
2127  __ bind(&loop);
2128  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2129  __ dec(length);
2130  __ j(not_zero, &loop);
2131
2132  // Invoke the function.
2133  __ bind(&invoke);
2134  ASSERT(receiver.is(eax));
2135  v8::internal::ParameterCount actual(eax);
2136  SafepointGenerator safepoint_generator(this,
2137                                         instr->pointer_map(),
2138                                         Safepoint::kNoDeoptimizationIndex);
2139  __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
2140}
2141
2142
2143void LCodeGen::DoPushArgument(LPushArgument* instr) {
2144  LOperand* argument = instr->InputAt(0);
2145  if (argument->IsConstantOperand()) {
2146    __ push(ToImmediate(argument));
2147  } else {
2148    __ push(ToOperand(argument));
2149  }
2150}
2151
2152
2153void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2154  Register result = ToRegister(instr->result());
2155  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2156}
2157
2158
2159void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2160  Register result = ToRegister(instr->result());
2161  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2162  __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
2163}
2164
2165
2166void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2167                                 int arity,
2168                                 LInstruction* instr) {
2169  // Change context if needed.
2170  bool change_context =
2171      (graph()->info()->closure()->context() != function->context()) ||
2172      scope()->contains_with() ||
2173      (scope()->num_heap_slots() > 0);
2174  if (change_context) {
2175    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2176  }
2177
2178  // Set eax to arguments count if adaption is not needed. Assumes that eax
2179  // is available to write to at this point.
2180  if (!function->NeedsArgumentsAdaption()) {
2181    __ mov(eax, arity);
2182  }
2183
2184  LPointerMap* pointers = instr->pointer_map();
2185  RecordPosition(pointers->position());
2186
2187  // Invoke function.
2188  if (*function == *graph()->info()->closure()) {
2189    __ CallSelf();
2190  } else {
2191    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
2192  }
2193
2194  // Setup deoptimization.
2195  RegisterLazyDeoptimization(instr);
2196
2197  // Restore context.
2198  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2199}
2200
2201
2202void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2203  ASSERT(ToRegister(instr->result()).is(eax));
2204  __ mov(edi, instr->function());
2205  CallKnownFunction(instr->function(), instr->arity(), instr);
2206}
2207
2208
2209void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2210  Register input_reg = ToRegister(instr->InputAt(0));
2211  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2212         Factory::heap_number_map());
2213  DeoptimizeIf(not_equal, instr->environment());
2214
2215  Label done;
2216  Register tmp = input_reg.is(eax) ? ecx : eax;
2217  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2218
2219  // Preserve the value of all registers.
2220  __ PushSafepointRegisters();
2221
2222  Label negative;
2223  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2224  // Check the sign of the argument. If the argument is positive,
2225  // just return it.
2226  __ test(tmp, Immediate(HeapNumber::kSignMask));
2227  __ j(not_zero, &negative);
2228  __ mov(tmp, input_reg);
2229  __ jmp(&done);
2230
2231  __ bind(&negative);
2232
2233  Label allocated, slow;
2234  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2235  __ jmp(&allocated);
2236
2237  // Slow case: Call the runtime system to do the number allocation.
2238  __ bind(&slow);
2239
2240  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2241  RecordSafepointWithRegisters(
2242      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2243  // Set the pointer to the new heap number in tmp.
2244  if (!tmp.is(eax)) __ mov(tmp, eax);
2245
2246  // Restore input_reg after call to runtime.
2247  __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
2248
2249  __ bind(&allocated);
2250  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2251  __ and_(tmp2, ~HeapNumber::kSignMask);
2252  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2253  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2254  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2255
2256  __ bind(&done);
2257  __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
2258
2259  __ PopSafepointRegisters();
2260}
2261
2262
2263void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2264  // Class for deferred case.
2265  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2266   public:
2267    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2268                                    LUnaryMathOperation* instr)
2269        : LDeferredCode(codegen), instr_(instr) { }
2270    virtual void Generate() {
2271      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2272    }
2273   private:
2274    LUnaryMathOperation* instr_;
2275  };
2276
2277  ASSERT(instr->InputAt(0)->Equals(instr->result()));
2278  Representation r = instr->hydrogen()->value()->representation();
2279
2280  if (r.IsDouble()) {
2281    XMMRegister  scratch = xmm0;
2282    XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2283    __ pxor(scratch, scratch);
2284    __ subsd(scratch, input_reg);
2285    __ pand(input_reg, scratch);
2286  } else if (r.IsInteger32()) {
2287    Register input_reg = ToRegister(instr->InputAt(0));
2288    __ test(input_reg, Operand(input_reg));
2289    Label is_positive;
2290    __ j(not_sign, &is_positive);
2291    __ neg(input_reg);
2292    __ test(input_reg, Operand(input_reg));
2293    DeoptimizeIf(negative, instr->environment());
2294    __ bind(&is_positive);
2295  } else {  // Tagged case.
2296    DeferredMathAbsTaggedHeapNumber* deferred =
2297        new DeferredMathAbsTaggedHeapNumber(this, instr);
2298    Label not_smi;
2299    Register input_reg = ToRegister(instr->InputAt(0));
2300    // Smi check.
2301    __ test(input_reg, Immediate(kSmiTagMask));
2302    __ j(not_zero, deferred->entry());
2303    __ test(input_reg, Operand(input_reg));
2304    Label is_positive;
2305    __ j(not_sign, &is_positive);
2306    __ neg(input_reg);
2307
2308    __ test(input_reg, Operand(input_reg));
2309    DeoptimizeIf(negative, instr->environment());
2310
2311    __ bind(&is_positive);
2312    __ bind(deferred->exit());
2313  }
2314}
2315
2316
2317void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2318  XMMRegister xmm_scratch = xmm0;
2319  Register output_reg = ToRegister(instr->result());
2320  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2321  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
2322  __ ucomisd(input_reg, xmm_scratch);
2323
2324  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2325    DeoptimizeIf(below_equal, instr->environment());
2326  } else {
2327    DeoptimizeIf(below, instr->environment());
2328  }
2329
2330  // Use truncating instruction (OK because input is positive).
2331  __ cvttsd2si(output_reg, Operand(input_reg));
2332
2333  // Overflow is signalled with minint.
2334  __ cmp(output_reg, 0x80000000u);
2335  DeoptimizeIf(equal, instr->environment());
2336}
2337
2338
2339void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2340  XMMRegister xmm_scratch = xmm0;
2341  Register output_reg = ToRegister(instr->result());
2342  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2343
2344  // xmm_scratch = 0.5
2345  ExternalReference one_half = ExternalReference::address_of_one_half();
2346  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
2347
2348  // input = input + 0.5
2349  __ addsd(input_reg, xmm_scratch);
2350
2351  // We need to return -0 for the input range [-0.5, 0[, otherwise
2352  // compute Math.floor(value + 0.5).
2353  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2354    __ ucomisd(input_reg, xmm_scratch);
2355    DeoptimizeIf(below_equal, instr->environment());
2356  } else {
2357    // If we don't need to bailout on -0, we check only bailout
2358    // on negative inputs.
2359    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
2360    __ ucomisd(input_reg, xmm_scratch);
2361    DeoptimizeIf(below, instr->environment());
2362  }
2363
2364  // Compute Math.floor(value + 0.5).
2365  // Use truncating instruction (OK because input is positive).
2366  __ cvttsd2si(output_reg, Operand(input_reg));
2367
2368  // Overflow is signalled with minint.
2369  __ cmp(output_reg, 0x80000000u);
2370  DeoptimizeIf(equal, instr->environment());
2371}
2372
2373
2374void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2375  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2376  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2377  __ sqrtsd(input_reg, input_reg);
2378}
2379
2380
2381void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2382  XMMRegister xmm_scratch = xmm0;
2383  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2384  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2385  ExternalReference negative_infinity =
2386      ExternalReference::address_of_negative_infinity();
2387  __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
2388  __ ucomisd(xmm_scratch, input_reg);
2389  DeoptimizeIf(equal, instr->environment());
2390  __ sqrtsd(input_reg, input_reg);
2391}
2392
2393
2394void LCodeGen::DoPower(LPower* instr) {
2395  LOperand* left = instr->InputAt(0);
2396  LOperand* right = instr->InputAt(1);
2397  DoubleRegister result_reg = ToDoubleRegister(instr->result());
2398  Representation exponent_type = instr->hydrogen()->right()->representation();
2399  if (exponent_type.IsDouble()) {
2400    // It is safe to use ebx directly since the instruction is marked
2401    // as a call.
2402    __ PrepareCallCFunction(4, ebx);
2403    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2404    __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
2405    __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2406  } else if (exponent_type.IsInteger32()) {
2407    // It is safe to use ebx directly since the instruction is marked
2408    // as a call.
2409    ASSERT(!ToRegister(right).is(ebx));
2410    __ PrepareCallCFunction(4, ebx);
2411    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2412    __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
2413    __ CallCFunction(ExternalReference::power_double_int_function(), 4);
2414  } else {
2415    ASSERT(exponent_type.IsTagged());
2416    CpuFeatures::Scope scope(SSE2);
2417    Register right_reg = ToRegister(right);
2418
2419    Label non_smi, call;
2420    __ test(right_reg, Immediate(kSmiTagMask));
2421    __ j(not_zero, &non_smi);
2422    __ SmiUntag(right_reg);
2423    __ cvtsi2sd(result_reg, Operand(right_reg));
2424    __ jmp(&call);
2425
2426    __ bind(&non_smi);
2427    // It is safe to use ebx directly since the instruction is marked
2428    // as a call.
2429    ASSERT(!right_reg.is(ebx));
2430    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
2431    DeoptimizeIf(not_equal, instr->environment());
2432    __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
2433
2434    __ bind(&call);
2435    __ PrepareCallCFunction(4, ebx);
2436    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2437    __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
2438    __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2439  }
2440
2441  // Return value is in st(0) on ia32.
2442  // Store it into the (fixed) result register.
2443  __ sub(Operand(esp), Immediate(kDoubleSize));
2444  __ fstp_d(Operand(esp, 0));
2445  __ movdbl(result_reg, Operand(esp, 0));
2446  __ add(Operand(esp), Immediate(kDoubleSize));
2447}
2448
2449
2450void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
2451  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2452  TranscendentalCacheStub stub(TranscendentalCache::LOG,
2453                               TranscendentalCacheStub::UNTAGGED);
2454  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2455}
2456
2457
2458void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
2459  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2460  TranscendentalCacheStub stub(TranscendentalCache::COS,
2461                               TranscendentalCacheStub::UNTAGGED);
2462  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2463}
2464
2465
2466void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
2467  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2468  TranscendentalCacheStub stub(TranscendentalCache::SIN,
2469                               TranscendentalCacheStub::UNTAGGED);
2470  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2471}
2472
2473
2474void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2475  switch (instr->op()) {
2476    case kMathAbs:
2477      DoMathAbs(instr);
2478      break;
2479    case kMathFloor:
2480      DoMathFloor(instr);
2481      break;
2482    case kMathRound:
2483      DoMathRound(instr);
2484      break;
2485    case kMathSqrt:
2486      DoMathSqrt(instr);
2487      break;
2488    case kMathPowHalf:
2489      DoMathPowHalf(instr);
2490      break;
2491    case kMathCos:
2492      DoMathCos(instr);
2493      break;
2494    case kMathSin:
2495      DoMathSin(instr);
2496      break;
2497    case kMathLog:
2498      DoMathLog(instr);
2499      break;
2500
2501    default:
2502      UNREACHABLE();
2503  }
2504}
2505
2506
2507void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2508  ASSERT(ToRegister(instr->result()).is(eax));
2509  ASSERT(ToRegister(instr->InputAt(0)).is(ecx));
2510
2511  int arity = instr->arity();
2512  Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
2513  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2514  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2515}
2516
2517
2518void LCodeGen::DoCallNamed(LCallNamed* instr) {
2519  ASSERT(ToRegister(instr->result()).is(eax));
2520
2521  int arity = instr->arity();
2522  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2523  __ mov(ecx, instr->name());
2524  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2525  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2526}
2527
2528
2529void LCodeGen::DoCallFunction(LCallFunction* instr) {
2530  ASSERT(ToRegister(instr->result()).is(eax));
2531
2532  int arity = instr->arity();
2533  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
2534  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2535  __ Drop(1);
2536  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2537}
2538
2539
2540void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
2541  ASSERT(ToRegister(instr->result()).is(eax));
2542
2543  int arity = instr->arity();
2544  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2545  __ mov(ecx, instr->name());
2546  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2547  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2548}
2549
2550
2551void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
2552  ASSERT(ToRegister(instr->result()).is(eax));
2553  __ mov(edi, instr->target());
2554  CallKnownFunction(instr->target(), instr->arity(), instr);
2555}
2556
2557
2558void LCodeGen::DoCallNew(LCallNew* instr) {
2559  ASSERT(ToRegister(instr->InputAt(0)).is(edi));
2560  ASSERT(ToRegister(instr->result()).is(eax));
2561
2562  Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
2563  __ Set(eax, Immediate(instr->arity()));
2564  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
2565}
2566
2567
2568void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2569  CallRuntime(instr->function(), instr->arity(), instr);
2570}
2571
2572
2573void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
2574  Register object = ToRegister(instr->object());
2575  Register value = ToRegister(instr->value());
2576  int offset = instr->offset();
2577
2578  if (!instr->transition().is_null()) {
2579    __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
2580  }
2581
2582  // Do the store.
2583  if (instr->is_in_object()) {
2584    __ mov(FieldOperand(object, offset), value);
2585    if (instr->needs_write_barrier()) {
2586      Register temp = ToRegister(instr->TempAt(0));
2587      // Update the write barrier for the object for in-object properties.
2588      __ RecordWrite(object, offset, value, temp);
2589    }
2590  } else {
2591    Register temp = ToRegister(instr->TempAt(0));
2592    __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
2593    __ mov(FieldOperand(temp, offset), value);
2594    if (instr->needs_write_barrier()) {
2595      // Update the write barrier for the properties array.
2596      // object is used as a scratch register.
2597      __ RecordWrite(temp, offset, value, object);
2598    }
2599  }
2600}
2601
2602
2603void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2604  ASSERT(ToRegister(instr->object()).is(edx));
2605  ASSERT(ToRegister(instr->value()).is(eax));
2606
2607  __ mov(ecx, instr->name());
2608  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
2609  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2610}
2611
2612
2613void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
2614  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
2615  DeoptimizeIf(above_equal, instr->environment());
2616}
2617
2618
2619void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
2620  Register value = ToRegister(instr->value());
2621  Register elements = ToRegister(instr->object());
2622  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
2623
2624  // Do the store.
2625  if (instr->key()->IsConstantOperand()) {
2626    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
2627    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2628    int offset =
2629        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
2630    __ mov(FieldOperand(elements, offset), value);
2631  } else {
2632    __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
2633           value);
2634  }
2635
2636  if (instr->hydrogen()->NeedsWriteBarrier()) {
2637    // Compute address of modified element and store it into key register.
2638    __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2639    __ RecordWrite(elements, key, value);
2640  }
2641}
2642
2643
2644void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
2645  ASSERT(ToRegister(instr->object()).is(edx));
2646  ASSERT(ToRegister(instr->key()).is(ecx));
2647  ASSERT(ToRegister(instr->value()).is(eax));
2648
2649  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
2650  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2651}
2652
2653
2654void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
2655  LOperand* input = instr->InputAt(0);
2656  ASSERT(input->IsRegister() || input->IsStackSlot());
2657  LOperand* output = instr->result();
2658  ASSERT(output->IsDoubleRegister());
2659  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
2660}
2661
2662
2663void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
2664  class DeferredNumberTagI: public LDeferredCode {
2665   public:
2666    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
2667        : LDeferredCode(codegen), instr_(instr) { }
2668    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
2669   private:
2670    LNumberTagI* instr_;
2671  };
2672
2673  LOperand* input = instr->InputAt(0);
2674  ASSERT(input->IsRegister() && input->Equals(instr->result()));
2675  Register reg = ToRegister(input);
2676
2677  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
2678  __ SmiTag(reg);
2679  __ j(overflow, deferred->entry());
2680  __ bind(deferred->exit());
2681}
2682
2683
2684void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
2685  Label slow;
2686  Register reg = ToRegister(instr->InputAt(0));
2687  Register tmp = reg.is(eax) ? ecx : eax;
2688
2689  // Preserve the value of all registers.
2690  __ PushSafepointRegisters();
2691
2692  // There was overflow, so bits 30 and 31 of the original integer
2693  // disagree. Try to allocate a heap number in new space and store
2694  // the value in there. If that fails, call the runtime system.
2695  NearLabel done;
2696  __ SmiUntag(reg);
2697  __ xor_(reg, 0x80000000);
2698  __ cvtsi2sd(xmm0, Operand(reg));
2699  if (FLAG_inline_new) {
2700    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
2701    __ jmp(&done);
2702  }
2703
2704  // Slow case: Call the runtime system to do the number allocation.
2705  __ bind(&slow);
2706
2707  // TODO(3095996): Put a valid pointer value in the stack slot where the result
2708  // register is stored, as this register is in the pointer map, but contains an
2709  // integer value.
2710  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
2711
2712  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2713  RecordSafepointWithRegisters(
2714      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2715  if (!reg.is(eax)) __ mov(reg, eax);
2716
2717  // Done. Put the value in xmm0 into the value of the allocated heap
2718  // number.
2719  __ bind(&done);
2720  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
2721  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
2722  __ PopSafepointRegisters();
2723}
2724
2725
2726void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
2727  class DeferredNumberTagD: public LDeferredCode {
2728   public:
2729    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
2730        : LDeferredCode(codegen), instr_(instr) { }
2731    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
2732   private:
2733    LNumberTagD* instr_;
2734  };
2735
2736  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2737  Register reg = ToRegister(instr->result());
2738  Register tmp = ToRegister(instr->TempAt(0));
2739
2740  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
2741  if (FLAG_inline_new) {
2742    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
2743  } else {
2744    __ jmp(deferred->entry());
2745  }
2746  __ bind(deferred->exit());
2747  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
2748}
2749
2750
2751void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
2752  // TODO(3095996): Get rid of this. For now, we need to make the
2753  // result register contain a valid pointer because it is already
2754  // contained in the register pointer map.
2755  Register reg = ToRegister(instr->result());
2756  __ Set(reg, Immediate(0));
2757
2758  __ PushSafepointRegisters();
2759  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2760  RecordSafepointWithRegisters(
2761      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2762  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
2763  __ PopSafepointRegisters();
2764}
2765
2766
2767void LCodeGen::DoSmiTag(LSmiTag* instr) {
2768  LOperand* input = instr->InputAt(0);
2769  ASSERT(input->IsRegister() && input->Equals(instr->result()));
2770  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
2771  __ SmiTag(ToRegister(input));
2772}
2773
2774
2775void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
2776  LOperand* input = instr->InputAt(0);
2777  ASSERT(input->IsRegister() && input->Equals(instr->result()));
2778  if (instr->needs_check()) {
2779    __ test(ToRegister(input), Immediate(kSmiTagMask));
2780    DeoptimizeIf(not_zero, instr->environment());
2781  }
2782  __ SmiUntag(ToRegister(input));
2783}
2784
2785
2786void LCodeGen::EmitNumberUntagD(Register input_reg,
2787                                XMMRegister result_reg,
2788                                LEnvironment* env) {
2789  NearLabel load_smi, heap_number, done;
2790
2791  // Smi check.
2792  __ test(input_reg, Immediate(kSmiTagMask));
2793  __ j(zero, &load_smi, not_taken);
2794
2795  // Heap number map check.
2796  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2797         Factory::heap_number_map());
2798  __ j(equal, &heap_number);
2799
2800  __ cmp(input_reg, Factory::undefined_value());
2801  DeoptimizeIf(not_equal, env);
2802
2803  // Convert undefined to NaN.
2804  __ push(input_reg);
2805  __ mov(input_reg, Factory::nan_value());
2806  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2807  __ pop(input_reg);
2808  __ jmp(&done);
2809
2810  // Heap number to XMM conversion.
2811  __ bind(&heap_number);
2812  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2813  __ jmp(&done);
2814
2815  // Smi to XMM conversion
2816  __ bind(&load_smi);
2817  __ SmiUntag(input_reg);  // Untag smi before converting to float.
2818  __ cvtsi2sd(result_reg, Operand(input_reg));
2819  __ SmiTag(input_reg);  // Retag smi.
2820  __ bind(&done);
2821}
2822
2823
2824class DeferredTaggedToI: public LDeferredCode {
2825 public:
2826  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
2827      : LDeferredCode(codegen), instr_(instr) { }
2828  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
2829 private:
2830  LTaggedToI* instr_;
2831};
2832
2833
2834void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
2835  NearLabel done, heap_number;
2836  Register input_reg = ToRegister(instr->InputAt(0));
2837
2838  // Heap number map check.
2839  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2840         Factory::heap_number_map());
2841
2842  if (instr->truncating()) {
2843    __ j(equal, &heap_number);
2844    // Check for undefined. Undefined is converted to zero for truncating
2845    // conversions.
2846    __ cmp(input_reg, Factory::undefined_value());
2847    DeoptimizeIf(not_equal, instr->environment());
2848    __ mov(input_reg, 0);
2849    __ jmp(&done);
2850
2851    __ bind(&heap_number);
2852    if (CpuFeatures::IsSupported(SSE3)) {
2853      CpuFeatures::Scope scope(SSE3);
2854      NearLabel convert;
2855      // Use more powerful conversion when sse3 is available.
2856      // Load x87 register with heap number.
2857      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
2858      // Get exponent alone and check for too-big exponent.
2859      __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2860      __ and_(input_reg, HeapNumber::kExponentMask);
2861      const uint32_t kTooBigExponent =
2862          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2863      __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
2864      __ j(less, &convert);
2865      // Pop FPU stack before deoptimizing.
2866      __ ffree(0);
2867      __ fincstp();
2868      DeoptimizeIf(no_condition, instr->environment());
2869
2870      // Reserve space for 64 bit answer.
2871      __ bind(&convert);
2872      __ sub(Operand(esp), Immediate(kDoubleSize));
2873      // Do conversion, which cannot fail because we checked the exponent.
2874      __ fisttp_d(Operand(esp, 0));
2875      __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
2876      __ add(Operand(esp), Immediate(kDoubleSize));
2877    } else {
2878      NearLabel deopt;
2879      XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
2880      __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2881      __ cvttsd2si(input_reg, Operand(xmm0));
2882      __ cmp(input_reg, 0x80000000u);
2883      __ j(not_equal, &done);
2884      // Check if the input was 0x8000000 (kMinInt).
2885      // If no, then we got an overflow and we deoptimize.
2886      ExternalReference min_int = ExternalReference::address_of_min_int();
2887      __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
2888      __ ucomisd(xmm_temp, xmm0);
2889      DeoptimizeIf(not_equal, instr->environment());
2890      DeoptimizeIf(parity_even, instr->environment());  // NaN.
2891    }
2892  } else {
2893    // Deoptimize if we don't have a heap number.
2894    DeoptimizeIf(not_equal, instr->environment());
2895
2896    XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
2897    __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2898    __ cvttsd2si(input_reg, Operand(xmm0));
2899    __ cvtsi2sd(xmm_temp, Operand(input_reg));
2900    __ ucomisd(xmm0, xmm_temp);
2901    DeoptimizeIf(not_equal, instr->environment());
2902    DeoptimizeIf(parity_even, instr->environment());  // NaN.
2903    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2904      __ test(input_reg, Operand(input_reg));
2905      __ j(not_zero, &done);
2906      __ movmskpd(input_reg, xmm0);
2907      __ and_(input_reg, 1);
2908      DeoptimizeIf(not_zero, instr->environment());
2909    }
2910  }
2911  __ bind(&done);
2912}
2913
2914
2915void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
2916  LOperand* input = instr->InputAt(0);
2917  ASSERT(input->IsRegister());
2918  ASSERT(input->Equals(instr->result()));
2919
2920  Register input_reg = ToRegister(input);
2921
2922  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
2923
2924  // Smi check.
2925  __ test(input_reg, Immediate(kSmiTagMask));
2926  __ j(not_zero, deferred->entry());
2927
2928  // Smi to int32 conversion
2929  __ SmiUntag(input_reg);  // Untag smi.
2930
2931  __ bind(deferred->exit());
2932}
2933
2934
2935void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
2936  LOperand* input = instr->InputAt(0);
2937  ASSERT(input->IsRegister());
2938  LOperand* result = instr->result();
2939  ASSERT(result->IsDoubleRegister());
2940
2941  Register input_reg = ToRegister(input);
2942  XMMRegister result_reg = ToDoubleRegister(result);
2943
2944  EmitNumberUntagD(input_reg, result_reg, instr->environment());
2945}
2946
2947
2948void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
2949  LOperand* input = instr->InputAt(0);
2950  ASSERT(input->IsDoubleRegister());
2951  LOperand* result = instr->result();
2952  ASSERT(result->IsRegister());
2953
2954  XMMRegister input_reg = ToDoubleRegister(input);
2955  Register result_reg = ToRegister(result);
2956
2957  if (instr->truncating()) {
2958    // Performs a truncating conversion of a floating point number as used by
2959    // the JS bitwise operations.
2960    __ cvttsd2si(result_reg, Operand(input_reg));
2961    __ cmp(result_reg, 0x80000000u);
2962    if (CpuFeatures::IsSupported(SSE3)) {
2963      // This will deoptimize if the exponent of the input in out of range.
2964      CpuFeatures::Scope scope(SSE3);
2965      NearLabel convert, done;
2966      __ j(not_equal, &done);
2967      __ sub(Operand(esp), Immediate(kDoubleSize));
2968      __ movdbl(Operand(esp, 0), input_reg);
2969      // Get exponent alone and check for too-big exponent.
2970      __ mov(result_reg, Operand(esp, sizeof(int32_t)));
2971      __ and_(result_reg, HeapNumber::kExponentMask);
2972      const uint32_t kTooBigExponent =
2973          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2974      __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
2975      __ j(less, &convert);
2976      __ add(Operand(esp), Immediate(kDoubleSize));
2977      DeoptimizeIf(no_condition, instr->environment());
2978      __ bind(&convert);
2979      // Do conversion, which cannot fail because we checked the exponent.
2980      __ fld_d(Operand(esp, 0));
2981      __ fisttp_d(Operand(esp, 0));
2982      __ mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
2983      __ add(Operand(esp), Immediate(kDoubleSize));
2984      __ bind(&done);
2985    } else {
2986      NearLabel done;
2987      Register temp_reg = ToRegister(instr->TempAt(0));
2988      XMMRegister xmm_scratch = xmm0;
2989
2990      // If cvttsd2si succeeded, we're done. Otherwise, we attempt
2991      // manual conversion.
2992      __ j(not_equal, &done);
2993
2994      // Get high 32 bits of the input in result_reg and temp_reg.
2995      __ pshufd(xmm_scratch, input_reg, 1);
2996      __ movd(Operand(temp_reg), xmm_scratch);
2997      __ mov(result_reg, temp_reg);
2998
2999      // Prepare negation mask in temp_reg.
3000      __ sar(temp_reg, kBitsPerInt - 1);
3001
3002      // Extract the exponent from result_reg and subtract adjusted
3003      // bias from it. The adjustment is selected in a way such that
3004      // when the difference is zero, the answer is in the low 32 bits
3005      // of the input, otherwise a shift has to be performed.
3006      __ shr(result_reg, HeapNumber::kExponentShift);
3007      __ and_(result_reg,
3008              HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
3009      __ sub(Operand(result_reg),
3010             Immediate(HeapNumber::kExponentBias +
3011                       HeapNumber::kExponentBits +
3012                       HeapNumber::kMantissaBits));
3013      // Don't handle big (> kMantissaBits + kExponentBits == 63) or
3014      // special exponents.
3015      DeoptimizeIf(greater, instr->environment());
3016
3017      // Zero out the sign and the exponent in the input (by shifting
3018      // it to the left) and restore the implicit mantissa bit,
3019      // i.e. convert the input to unsigned int64 shifted left by
3020      // kExponentBits.
3021      ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
3022      // Minus zero has the most significant bit set and the other
3023      // bits cleared.
3024      __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
3025      __ psllq(input_reg, HeapNumber::kExponentBits);
3026      __ por(input_reg, xmm_scratch);
3027
3028      // Get the amount to shift the input right in xmm_scratch.
3029      __ neg(result_reg);
3030      __ movd(xmm_scratch, Operand(result_reg));
3031
3032      // Shift the input right and extract low 32 bits.
3033      __ psrlq(input_reg, xmm_scratch);
3034      __ movd(Operand(result_reg), input_reg);
3035
3036      // Use the prepared mask in temp_reg to negate the result if necessary.
3037      __ xor_(result_reg, Operand(temp_reg));
3038      __ sub(result_reg, Operand(temp_reg));
3039      __ bind(&done);
3040    }
3041  } else {
3042    NearLabel done;
3043    __ cvttsd2si(result_reg, Operand(input_reg));
3044    __ cvtsi2sd(xmm0, Operand(result_reg));
3045    __ ucomisd(xmm0, input_reg);
3046    DeoptimizeIf(not_equal, instr->environment());
3047    DeoptimizeIf(parity_even, instr->environment());  // NaN.
3048    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3049      // The integer converted back is equal to the original. We
3050      // only have to test if we got -0 as an input.
3051      __ test(result_reg, Operand(result_reg));
3052      __ j(not_zero, &done);
3053      __ movmskpd(result_reg, input_reg);
3054      // Bit 0 contains the sign of the double in input_reg.
3055      // If input was positive, we are ok and return 0, otherwise
3056      // deoptimize.
3057      __ and_(result_reg, 1);
3058      DeoptimizeIf(not_zero, instr->environment());
3059    }
3060    __ bind(&done);
3061  }
3062}
3063
3064
3065void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3066  LOperand* input = instr->InputAt(0);
3067  ASSERT(input->IsRegister());
3068  __ test(ToRegister(input), Immediate(kSmiTagMask));
3069  DeoptimizeIf(instr->condition(), instr->environment());
3070}
3071
3072
3073void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3074  Register input = ToRegister(instr->InputAt(0));
3075  Register temp = ToRegister(instr->TempAt(0));
3076  InstanceType first = instr->hydrogen()->first();
3077  InstanceType last = instr->hydrogen()->last();
3078
3079  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
3080  __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3081          static_cast<int8_t>(first));
3082
3083  // If there is only one type in the interval check for equality.
3084  if (first == last) {
3085    DeoptimizeIf(not_equal, instr->environment());
3086  } else {
3087    DeoptimizeIf(below, instr->environment());
3088    // Omit check for the last type.
3089    if (last != LAST_TYPE) {
3090      __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3091              static_cast<int8_t>(last));
3092      DeoptimizeIf(above, instr->environment());
3093    }
3094  }
3095}
3096
3097
3098void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
3099  ASSERT(instr->InputAt(0)->IsRegister());
3100  Register reg = ToRegister(instr->InputAt(0));
3101  __ cmp(reg, instr->hydrogen()->target());
3102  DeoptimizeIf(not_equal, instr->environment());
3103}
3104
3105
3106void LCodeGen::DoCheckMap(LCheckMap* instr) {
3107  LOperand* input = instr->InputAt(0);
3108  ASSERT(input->IsRegister());
3109  Register reg = ToRegister(input);
3110  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3111         instr->hydrogen()->map());
3112  DeoptimizeIf(not_equal, instr->environment());
3113}
3114
3115
3116void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
3117  if (Heap::InNewSpace(*object)) {
3118    Handle<JSGlobalPropertyCell> cell =
3119        Factory::NewJSGlobalPropertyCell(object);
3120    __ mov(result, Operand::Cell(cell));
3121  } else {
3122    __ mov(result, object);
3123  }
3124}
3125
3126
3127void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
3128  Register reg = ToRegister(instr->TempAt(0));
3129
3130  Handle<JSObject> holder = instr->holder();
3131  Handle<JSObject> current_prototype = instr->prototype();
3132
3133  // Load prototype object.
3134  LoadHeapObject(reg, current_prototype);
3135
3136  // Check prototype maps up to the holder.
3137  while (!current_prototype.is_identical_to(holder)) {
3138    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3139           Handle<Map>(current_prototype->map()));
3140    DeoptimizeIf(not_equal, instr->environment());
3141    current_prototype =
3142        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
3143    // Load next prototype object.
3144    LoadHeapObject(reg, current_prototype);
3145  }
3146
3147  // Check the holder map.
3148  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3149         Handle<Map>(current_prototype->map()));
3150  DeoptimizeIf(not_equal, instr->environment());
3151}
3152
3153
3154void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
3155  // Setup the parameters to the stub/runtime call.
3156  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3157  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
3158  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3159  __ push(Immediate(instr->hydrogen()->constant_elements()));
3160
3161  // Pick the right runtime function or stub to call.
3162  int length = instr->hydrogen()->length();
3163  if (instr->hydrogen()->IsCopyOnWrite()) {
3164    ASSERT(instr->hydrogen()->depth() == 1);
3165    FastCloneShallowArrayStub::Mode mode =
3166        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
3167    FastCloneShallowArrayStub stub(mode, length);
3168    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3169  } else if (instr->hydrogen()->depth() > 1) {
3170    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
3171  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
3172    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
3173  } else {
3174    FastCloneShallowArrayStub::Mode mode =
3175        FastCloneShallowArrayStub::CLONE_ELEMENTS;
3176    FastCloneShallowArrayStub stub(mode, length);
3177    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3178  }
3179}
3180
3181
3182void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
3183  // Setup the parameters to the stub/runtime call.
3184  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3185  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
3186  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3187  __ push(Immediate(instr->hydrogen()->constant_properties()));
3188  __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
3189
3190  // Pick the right runtime function to call.
3191  if (instr->hydrogen()->depth() > 1) {
3192    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
3193  } else {
3194    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
3195  }
3196}
3197
3198
3199void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
3200  NearLabel materialized;
3201  // Registers will be used as follows:
3202  // edi = JS function.
3203  // ecx = literals array.
3204  // ebx = regexp literal.
3205  // eax = regexp literal clone.
3206  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3207  __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
3208  int literal_offset = FixedArray::kHeaderSize +
3209      instr->hydrogen()->literal_index() * kPointerSize;
3210  __ mov(ebx, FieldOperand(ecx, literal_offset));
3211  __ cmp(ebx, Factory::undefined_value());
3212  __ j(not_equal, &materialized);
3213
3214  // Create regexp literal using runtime function
3215  // Result will be in eax.
3216  __ push(ecx);
3217  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3218  __ push(Immediate(instr->hydrogen()->pattern()));
3219  __ push(Immediate(instr->hydrogen()->flags()));
3220  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
3221  __ mov(ebx, eax);
3222
3223  __ bind(&materialized);
3224  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
3225  Label allocated, runtime_allocate;
3226  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
3227  __ jmp(&allocated);
3228
3229  __ bind(&runtime_allocate);
3230  __ push(ebx);
3231  __ push(Immediate(Smi::FromInt(size)));
3232  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
3233  __ pop(ebx);
3234
3235  __ bind(&allocated);
3236  // Copy the content into the newly allocated memory.
3237  // (Unroll copy loop once for better throughput).
3238  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
3239    __ mov(edx, FieldOperand(ebx, i));
3240    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
3241    __ mov(FieldOperand(eax, i), edx);
3242    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
3243  }
3244  if ((size % (2 * kPointerSize)) != 0) {
3245    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
3246    __ mov(FieldOperand(eax, size - kPointerSize), edx);
3247  }
3248}
3249
3250
3251void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
3252  // Use the fast case closure allocation code that allocates in new
3253  // space for nested functions that don't need literals cloning.
3254  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
3255  bool pretenure = !instr->hydrogen()->pretenure();
3256  if (shared_info->num_literals() == 0 && !pretenure) {
3257    FastNewClosureStub stub;
3258    __ push(Immediate(shared_info));
3259    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3260  } else {
3261    __ push(esi);
3262    __ push(Immediate(shared_info));
3263    __ push(Immediate(pretenure
3264                      ? Factory::true_value()
3265                      : Factory::false_value()));
3266    CallRuntime(Runtime::kNewClosure, 3, instr);
3267  }
3268}
3269
3270
3271void LCodeGen::DoTypeof(LTypeof* instr) {
3272  LOperand* input = instr->InputAt(0);
3273  if (input->IsConstantOperand()) {
3274    __ push(ToImmediate(input));
3275  } else {
3276    __ push(ToOperand(input));
3277  }
3278  CallRuntime(Runtime::kTypeof, 1, instr);
3279}
3280
3281
3282void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
3283  Register input = ToRegister(instr->InputAt(0));
3284  Register result = ToRegister(instr->result());
3285  Label true_label;
3286  Label false_label;
3287  NearLabel done;
3288
3289  Condition final_branch_condition = EmitTypeofIs(&true_label,
3290                                                  &false_label,
3291                                                  input,
3292                                                  instr->type_literal());
3293  __ j(final_branch_condition, &true_label);
3294  __ bind(&false_label);
3295  __ mov(result, Handle<Object>(Heap::false_value()));
3296  __ jmp(&done);
3297
3298  __ bind(&true_label);
3299  __ mov(result, Handle<Object>(Heap::true_value()));
3300
3301  __ bind(&done);
3302}
3303
3304
3305void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
3306  Register input = ToRegister(instr->InputAt(0));
3307  int true_block = chunk_->LookupDestination(instr->true_block_id());
3308  int false_block = chunk_->LookupDestination(instr->false_block_id());
3309  Label* true_label = chunk_->GetAssemblyLabel(true_block);
3310  Label* false_label = chunk_->GetAssemblyLabel(false_block);
3311
3312  Condition final_branch_condition = EmitTypeofIs(true_label,
3313                                                  false_label,
3314                                                  input,
3315                                                  instr->type_literal());
3316
3317  EmitBranch(true_block, false_block, final_branch_condition);
3318}
3319
3320
3321Condition LCodeGen::EmitTypeofIs(Label* true_label,
3322                                 Label* false_label,
3323                                 Register input,
3324                                 Handle<String> type_name) {
3325  Condition final_branch_condition = no_condition;
3326  if (type_name->Equals(Heap::number_symbol())) {
3327    __ test(input, Immediate(kSmiTagMask));
3328    __ j(zero, true_label);
3329    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
3330           Factory::heap_number_map());
3331    final_branch_condition = equal;
3332
3333  } else if (type_name->Equals(Heap::string_symbol())) {
3334    __ test(input, Immediate(kSmiTagMask));
3335    __ j(zero, false_label);
3336    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3337    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3338              1 << Map::kIsUndetectable);
3339    __ j(not_zero, false_label);
3340    __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
3341    final_branch_condition = below;
3342
3343  } else if (type_name->Equals(Heap::boolean_symbol())) {
3344    __ cmp(input, Handle<Object>(Heap::true_value()));
3345    __ j(equal, true_label);
3346    __ cmp(input, Handle<Object>(Heap::false_value()));
3347    final_branch_condition = equal;
3348
3349  } else if (type_name->Equals(Heap::undefined_symbol())) {
3350    __ cmp(input, Factory::undefined_value());
3351    __ j(equal, true_label);
3352    __ test(input, Immediate(kSmiTagMask));
3353    __ j(zero, false_label);
3354    // Check for undetectable objects => true.
3355    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3356    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3357              1 << Map::kIsUndetectable);
3358    final_branch_condition = not_zero;
3359
3360  } else if (type_name->Equals(Heap::function_symbol())) {
3361    __ test(input, Immediate(kSmiTagMask));
3362    __ j(zero, false_label);
3363    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
3364    __ j(equal, true_label);
3365    // Regular expressions => 'function' (they are callable).
3366    __ CmpInstanceType(input, JS_REGEXP_TYPE);
3367    final_branch_condition = equal;
3368
3369  } else if (type_name->Equals(Heap::object_symbol())) {
3370    __ test(input, Immediate(kSmiTagMask));
3371    __ j(zero, false_label);
3372    __ cmp(input, Factory::null_value());
3373    __ j(equal, true_label);
3374    // Regular expressions => 'function', not 'object'.
3375    __ CmpObjectType(input, JS_REGEXP_TYPE, input);
3376    __ j(equal, false_label);
3377    // Check for undetectable objects => false.
3378    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3379              1 << Map::kIsUndetectable);
3380    __ j(not_zero, false_label);
3381    // Check for JS objects => true.
3382    __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
3383    __ j(below, false_label);
3384    __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
3385    final_branch_condition = below_equal;
3386
3387  } else {
3388    final_branch_condition = not_equal;
3389    __ jmp(false_label);
3390    // A dead branch instruction will be generated after this point.
3391  }
3392
3393  return final_branch_condition;
3394}
3395
3396
3397void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
3398  // No code for lazy bailout instruction. Used to capture environment after a
3399  // call for populating the safepoint data with deoptimization data.
3400}
3401
3402
3403void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3404  DeoptimizeIf(no_condition, instr->environment());
3405}
3406
3407
3408void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3409  LOperand* obj = instr->object();
3410  LOperand* key = instr->key();
3411  __ push(ToOperand(obj));
3412  if (key->IsConstantOperand()) {
3413    __ push(ToImmediate(key));
3414  } else {
3415    __ push(ToOperand(key));
3416  }
3417  RecordPosition(instr->pointer_map()->position());
3418  SafepointGenerator safepoint_generator(this,
3419                                         instr->pointer_map(),
3420                                         Safepoint::kNoDeoptimizationIndex);
3421  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
3422}
3423
3424
3425void LCodeGen::DoStackCheck(LStackCheck* instr) {
3426  // Perform stack overflow check.
3427  NearLabel done;
3428  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
3429  __ cmp(esp, Operand::StaticVariable(stack_limit));
3430  __ j(above_equal, &done);
3431
3432  StackCheckStub stub;
3433  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3434  __ bind(&done);
3435}
3436
3437
3438void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3439  // This is a pseudo-instruction that ensures that the environment here is
3440  // properly registered for deoptimization and records the assembler's PC
3441  // offset.
3442  LEnvironment* environment = instr->environment();
3443  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
3444                                   instr->SpilledDoubleRegisterArray());
3445
3446  // If the environment were already registered, we would have no way of
3447  // backpatching it with the spill slot operands.
3448  ASSERT(!environment->HasBeenRegistered());
3449  RegisterEnvironmentForDeoptimization(environment);
3450  ASSERT(osr_pc_offset_ == -1);
3451  osr_pc_offset_ = masm()->pc_offset();
3452}
3453
3454
3455#undef __
3456
3457} }  // namespace v8::internal
3458
3459#endif  // V8_TARGET_ARCH_IA32
3460