lithium-codegen-x64.cc revision 7d3e7fc4b65010eabe860313ee0c64f50843f6e3
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "x64/lithium-codegen-x64.h"
33#include "code-stubs.h"
34#include "stub-cache.h"
35
36namespace v8 {
37namespace internal {
38
39
40// When invoking builtins, we need to record the safepoint in the middle of
41// the invoke instruction sequence generated by the macro assembler.
42class SafepointGenerator : public CallWrapper {
43 public:
44  SafepointGenerator(LCodeGen* codegen,
45                     LPointerMap* pointers,
46                     int deoptimization_index)
47      : codegen_(codegen),
48        pointers_(pointers),
49        deoptimization_index_(deoptimization_index) { }
50  virtual ~SafepointGenerator() { }
51
52  virtual void BeforeCall(int call_size) {
53    ASSERT(call_size >= 0);
54    // Ensure that we have enough space after the previous safepoint position
55    // for the jump generated there.
56    int call_end = codegen_->masm()->pc_offset() + call_size;
57    int prev_jump_end = codegen_->LastSafepointEnd() + kMinSafepointSize;
58    if (call_end < prev_jump_end) {
59      int padding_size = prev_jump_end - call_end;
60      STATIC_ASSERT(kMinSafepointSize <= 9);  // One multibyte nop is enough.
61      codegen_->masm()->nop(padding_size);
62    }
63  }
64
65  virtual void AfterCall() {
66    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
67  }
68
69 private:
70  static const int kMinSafepointSize =
71      MacroAssembler::kShortCallInstructionLength;
72  LCodeGen* codegen_;
73  LPointerMap* pointers_;
74  int deoptimization_index_;
75};
76
77
78#define __ masm()->
79
80bool LCodeGen::GenerateCode() {
81  HPhase phase("Code generation", chunk());
82  ASSERT(is_unused());
83  status_ = GENERATING;
84  return GeneratePrologue() &&
85      GenerateBody() &&
86      GenerateDeferredCode() &&
87      GenerateJumpTable() &&
88      GenerateSafepointTable();
89}
90
91
92void LCodeGen::FinishCode(Handle<Code> code) {
93  ASSERT(is_done());
94  code->set_stack_slots(StackSlotCount());
95  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
96  PopulateDeoptimizationData(code);
97  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
98}
99
100
101void LCodeGen::Abort(const char* format, ...) {
102  if (FLAG_trace_bailout) {
103    SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
104    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
105    va_list arguments;
106    va_start(arguments, format);
107    OS::VPrint(format, arguments);
108    va_end(arguments);
109    PrintF("\n");
110  }
111  status_ = ABORTED;
112}
113
114
115void LCodeGen::Comment(const char* format, ...) {
116  if (!FLAG_code_comments) return;
117  char buffer[4 * KB];
118  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
119  va_list arguments;
120  va_start(arguments, format);
121  builder.AddFormattedList(format, arguments);
122  va_end(arguments);
123
124  // Copy the string before recording it in the assembler to avoid
125  // issues when the stack allocated buffer goes out of scope.
126  int length = builder.position();
127  Vector<char> copy = Vector<char>::New(length + 1);
128  memcpy(copy.start(), builder.Finalize(), copy.length());
129  masm()->RecordComment(copy.start());
130}
131
132
133bool LCodeGen::GeneratePrologue() {
134  ASSERT(is_generating());
135
136#ifdef DEBUG
137  if (strlen(FLAG_stop_at) > 0 &&
138      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
139    __ int3();
140  }
141#endif
142
143  __ push(rbp);  // Caller's frame pointer.
144  __ movq(rbp, rsp);
145  __ push(rsi);  // Callee's context.
146  __ push(rdi);  // Callee's JS function.
147
148  // Reserve space for the stack slots needed by the code.
149  int slots = StackSlotCount();
150  if (slots > 0) {
151    if (FLAG_debug_code) {
152      __ Set(rax, slots);
153      __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
154      Label loop;
155      __ bind(&loop);
156      __ push(kScratchRegister);
157      __ decl(rax);
158      __ j(not_zero, &loop);
159    } else {
160      __ subq(rsp, Immediate(slots * kPointerSize));
161#ifdef _MSC_VER
162      // On windows, you may not access the stack more than one page below
163      // the most recently mapped page. To make the allocated area randomly
164      // accessible, we write to each page in turn (the value is irrelevant).
165      const int kPageSize = 4 * KB;
166      for (int offset = slots * kPointerSize - kPageSize;
167           offset > 0;
168           offset -= kPageSize) {
169        __ movq(Operand(rsp, offset), rax);
170      }
171#endif
172    }
173  }
174
175  // Possibly allocate a local context.
176  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177  if (heap_slots > 0) {
178    Comment(";;; Allocate local context");
179    // Argument to NewContext is the function, which is still in rdi.
180    __ push(rdi);
181    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
182      FastNewContextStub stub(heap_slots);
183      __ CallStub(&stub);
184    } else {
185      __ CallRuntime(Runtime::kNewContext, 1);
186    }
187    RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
188    // Context is returned in both rax and rsi.  It replaces the context
189    // passed to us.  It's saved in the stack and kept live in rsi.
190    __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
191
192    // Copy any necessary parameters into the context.
193    int num_parameters = scope()->num_parameters();
194    for (int i = 0; i < num_parameters; i++) {
195      Slot* slot = scope()->parameter(i)->AsSlot();
196      if (slot != NULL && slot->type() == Slot::CONTEXT) {
197        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
198            (num_parameters - 1 - i) * kPointerSize;
199        // Load parameter from stack.
200        __ movq(rax, Operand(rbp, parameter_offset));
201        // Store it in the context.
202        int context_offset = Context::SlotOffset(slot->index());
203        __ movq(Operand(rsi, context_offset), rax);
204        // Update the write barrier. This clobbers all involved
205        // registers, so we have use a third register to avoid
206        // clobbering rsi.
207        __ movq(rcx, rsi);
208        __ RecordWrite(rcx, context_offset, rax, rbx);
209      }
210    }
211    Comment(";;; End allocate local context");
212  }
213
214  // Trace the call.
215  if (FLAG_trace) {
216    __ CallRuntime(Runtime::kTraceEnter, 0);
217  }
218  return !is_aborted();
219}
220
221
222bool LCodeGen::GenerateBody() {
223  ASSERT(is_generating());
224  bool emit_instructions = true;
225  for (current_instruction_ = 0;
226       !is_aborted() && current_instruction_ < instructions_->length();
227       current_instruction_++) {
228    LInstruction* instr = instructions_->at(current_instruction_);
229    if (instr->IsLabel()) {
230      LLabel* label = LLabel::cast(instr);
231      emit_instructions = !label->HasReplacement();
232    }
233
234    if (emit_instructions) {
235      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
236      instr->CompileToNative(this);
237    }
238  }
239  return !is_aborted();
240}
241
242
243LInstruction* LCodeGen::GetNextInstruction() {
244  if (current_instruction_ < instructions_->length() - 1) {
245    return instructions_->at(current_instruction_ + 1);
246  } else {
247    return NULL;
248  }
249}
250
251
252bool LCodeGen::GenerateJumpTable() {
253  for (int i = 0; i < jump_table_.length(); i++) {
254    __ bind(&jump_table_[i].label);
255    __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
256  }
257  return !is_aborted();
258}
259
260
261bool LCodeGen::GenerateDeferredCode() {
262  ASSERT(is_generating());
263  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
264    LDeferredCode* code = deferred_[i];
265    __ bind(code->entry());
266    code->Generate();
267    __ jmp(code->exit());
268  }
269
270  // Deferred code is the last part of the instruction sequence. Mark
271  // the generated code as done unless we bailed out.
272  if (!is_aborted()) status_ = DONE;
273  return !is_aborted();
274}
275
276
277bool LCodeGen::GenerateSafepointTable() {
278  ASSERT(is_done());
279  // Ensure that there is space at the end of the code to write a number
280  // of jump instructions, as well as to afford writing a call near the end
281  // of the code.
282  // The jumps are used when there isn't room in the code stream to write
283  // a long call instruction. Instead it writes a shorter call to a
284  // jump instruction in the same code object.
285  // The calls are used when lazy deoptimizing a function and calls to a
286  // deoptimization function.
287  int short_deopts = safepoints_.CountShortDeoptimizationIntervals(
288      static_cast<unsigned>(MacroAssembler::kJumpInstructionLength));
289  int byte_count = (short_deopts) * MacroAssembler::kJumpInstructionLength;
290  while (byte_count-- > 0) {
291    __ int3();
292  }
293  safepoints_.Emit(masm(), StackSlotCount());
294  return !is_aborted();
295}
296
297
298Register LCodeGen::ToRegister(int index) const {
299  return Register::FromAllocationIndex(index);
300}
301
302
303XMMRegister LCodeGen::ToDoubleRegister(int index) const {
304  return XMMRegister::FromAllocationIndex(index);
305}
306
307
308Register LCodeGen::ToRegister(LOperand* op) const {
309  ASSERT(op->IsRegister());
310  return ToRegister(op->index());
311}
312
313
314XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
315  ASSERT(op->IsDoubleRegister());
316  return ToDoubleRegister(op->index());
317}
318
319
320bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
321  return op->IsConstantOperand() &&
322      chunk_->LookupLiteralRepresentation(op).IsInteger32();
323}
324
325
326bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
327  return op->IsConstantOperand() &&
328      chunk_->LookupLiteralRepresentation(op).IsTagged();
329}
330
331
332int LCodeGen::ToInteger32(LConstantOperand* op) const {
333  Handle<Object> value = chunk_->LookupLiteral(op);
334  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
335  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
336      value->Number());
337  return static_cast<int32_t>(value->Number());
338}
339
340
341Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
342  Handle<Object> literal = chunk_->LookupLiteral(op);
343  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
344  return literal;
345}
346
347
348Operand LCodeGen::ToOperand(LOperand* op) const {
349  // Does not handle registers. In X64 assembler, plain registers are not
350  // representable as an Operand.
351  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
352  int index = op->index();
353  if (index >= 0) {
354    // Local or spill slot. Skip the frame pointer, function, and
355    // context in the fixed part of the frame.
356    return Operand(rbp, -(index + 3) * kPointerSize);
357  } else {
358    // Incoming parameter. Skip the return address.
359    return Operand(rbp, -(index - 1) * kPointerSize);
360  }
361}
362
363
364void LCodeGen::WriteTranslation(LEnvironment* environment,
365                                Translation* translation) {
366  if (environment == NULL) return;
367
368  // The translation includes one command per value in the environment.
369  int translation_size = environment->values()->length();
370  // The output frame height does not include the parameters.
371  int height = translation_size - environment->parameter_count();
372
373  WriteTranslation(environment->outer(), translation);
374  int closure_id = DefineDeoptimizationLiteral(environment->closure());
375  translation->BeginFrame(environment->ast_id(), closure_id, height);
376  for (int i = 0; i < translation_size; ++i) {
377    LOperand* value = environment->values()->at(i);
378    // spilled_registers_ and spilled_double_registers_ are either
379    // both NULL or both set.
380    if (environment->spilled_registers() != NULL && value != NULL) {
381      if (value->IsRegister() &&
382          environment->spilled_registers()[value->index()] != NULL) {
383        translation->MarkDuplicate();
384        AddToTranslation(translation,
385                         environment->spilled_registers()[value->index()],
386                         environment->HasTaggedValueAt(i));
387      } else if (
388          value->IsDoubleRegister() &&
389          environment->spilled_double_registers()[value->index()] != NULL) {
390        translation->MarkDuplicate();
391        AddToTranslation(
392            translation,
393            environment->spilled_double_registers()[value->index()],
394            false);
395      }
396    }
397
398    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
399  }
400}
401
402
403void LCodeGen::AddToTranslation(Translation* translation,
404                                LOperand* op,
405                                bool is_tagged) {
406  if (op == NULL) {
407    // TODO(twuerthinger): Introduce marker operands to indicate that this value
408    // is not present and must be reconstructed from the deoptimizer. Currently
409    // this is only used for the arguments object.
410    translation->StoreArgumentsObject();
411  } else if (op->IsStackSlot()) {
412    if (is_tagged) {
413      translation->StoreStackSlot(op->index());
414    } else {
415      translation->StoreInt32StackSlot(op->index());
416    }
417  } else if (op->IsDoubleStackSlot()) {
418    translation->StoreDoubleStackSlot(op->index());
419  } else if (op->IsArgument()) {
420    ASSERT(is_tagged);
421    int src_index = StackSlotCount() + op->index();
422    translation->StoreStackSlot(src_index);
423  } else if (op->IsRegister()) {
424    Register reg = ToRegister(op);
425    if (is_tagged) {
426      translation->StoreRegister(reg);
427    } else {
428      translation->StoreInt32Register(reg);
429    }
430  } else if (op->IsDoubleRegister()) {
431    XMMRegister reg = ToDoubleRegister(op);
432    translation->StoreDoubleRegister(reg);
433  } else if (op->IsConstantOperand()) {
434    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
435    int src_index = DefineDeoptimizationLiteral(literal);
436    translation->StoreLiteral(src_index);
437  } else {
438    UNREACHABLE();
439  }
440}
441
442
443void LCodeGen::CallCodeGeneric(Handle<Code> code,
444                               RelocInfo::Mode mode,
445                               LInstruction* instr,
446                               SafepointMode safepoint_mode,
447                               int argc) {
448  ASSERT(instr != NULL);
449  LPointerMap* pointers = instr->pointer_map();
450  RecordPosition(pointers->position());
451  __ call(code, mode);
452  RegisterLazyDeoptimization(instr, safepoint_mode, argc);
453
454  // Signal that we don't inline smi code before these stubs in the
455  // optimizing code generator.
456  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
457      code->kind() == Code::COMPARE_IC) {
458    __ nop();
459  }
460}
461
462
463void LCodeGen::CallCode(Handle<Code> code,
464                        RelocInfo::Mode mode,
465                        LInstruction* instr) {
466  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
467}
468
469
470void LCodeGen::CallRuntime(const Runtime::Function* function,
471                           int num_arguments,
472                           LInstruction* instr) {
473  ASSERT(instr != NULL);
474  ASSERT(instr->HasPointerMap());
475  LPointerMap* pointers = instr->pointer_map();
476  RecordPosition(pointers->position());
477
478  __ CallRuntime(function, num_arguments);
479  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
480}
481
482
483void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
484                                       int argc,
485                                       LInstruction* instr) {
486  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
487  __ CallRuntimeSaveDoubles(id);
488  RecordSafepointWithRegisters(
489      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
490}
491
492
493void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
494                                          SafepointMode safepoint_mode,
495                                          int argc) {
496  // Create the environment to bailout to. If the call has side effects
497  // execution has to continue after the call otherwise execution can continue
498  // from a previous bailout point repeating the call.
499  LEnvironment* deoptimization_environment;
500  if (instr->HasDeoptimizationEnvironment()) {
501    deoptimization_environment = instr->deoptimization_environment();
502  } else {
503    deoptimization_environment = instr->environment();
504  }
505
506  RegisterEnvironmentForDeoptimization(deoptimization_environment);
507  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
508    ASSERT(argc == 0);
509    RecordSafepoint(instr->pointer_map(),
510                    deoptimization_environment->deoptimization_index());
511  } else {
512    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
513    RecordSafepointWithRegisters(
514        instr->pointer_map(),
515        argc,
516        deoptimization_environment->deoptimization_index());
517  }
518}
519
520
521void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
522  if (!environment->HasBeenRegistered()) {
523    // Physical stack frame layout:
524    // -x ............. -4  0 ..................................... y
525    // [incoming arguments] [spill slots] [pushed outgoing arguments]
526
527    // Layout of the environment:
528    // 0 ..................................................... size-1
529    // [parameters] [locals] [expression stack including arguments]
530
531    // Layout of the translation:
532    // 0 ........................................................ size - 1 + 4
533    // [expression stack including arguments] [locals] [4 words] [parameters]
534    // |>------------  translation_size ------------<|
535
536    int frame_count = 0;
537    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
538      ++frame_count;
539    }
540    Translation translation(&translations_, frame_count);
541    WriteTranslation(environment, &translation);
542    int deoptimization_index = deoptimizations_.length();
543    environment->Register(deoptimization_index, translation.index());
544    deoptimizations_.Add(environment);
545  }
546}
547
548
549void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
550  RegisterEnvironmentForDeoptimization(environment);
551  ASSERT(environment->HasBeenRegistered());
552  int id = environment->deoptimization_index();
553  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
554  ASSERT(entry != NULL);
555  if (entry == NULL) {
556    Abort("bailout was not prepared");
557    return;
558  }
559
560  if (cc == no_condition) {
561    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
562  } else {
563    // We often have several deopts to the same entry, reuse the last
564    // jump entry if this is the case.
565    if (jump_table_.is_empty() ||
566        jump_table_.last().address != entry) {
567      jump_table_.Add(JumpTableEntry(entry));
568    }
569    __ j(cc, &jump_table_.last().label);
570  }
571}
572
573
574void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
575  int length = deoptimizations_.length();
576  if (length == 0) return;
577  ASSERT(FLAG_deopt);
578  Handle<DeoptimizationInputData> data =
579      factory()->NewDeoptimizationInputData(length, TENURED);
580
581  Handle<ByteArray> translations = translations_.CreateByteArray();
582  data->SetTranslationByteArray(*translations);
583  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
584
585  Handle<FixedArray> literals =
586      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
587  for (int i = 0; i < deoptimization_literals_.length(); i++) {
588    literals->set(i, *deoptimization_literals_[i]);
589  }
590  data->SetLiteralArray(*literals);
591
592  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
593  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
594
595  // Populate the deoptimization entries.
596  for (int i = 0; i < length; i++) {
597    LEnvironment* env = deoptimizations_[i];
598    data->SetAstId(i, Smi::FromInt(env->ast_id()));
599    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
600    data->SetArgumentsStackHeight(i,
601                                  Smi::FromInt(env->arguments_stack_height()));
602  }
603  code->set_deoptimization_data(*data);
604}
605
606
607int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
608  int result = deoptimization_literals_.length();
609  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
610    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
611  }
612  deoptimization_literals_.Add(literal);
613  return result;
614}
615
616
617void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
618  ASSERT(deoptimization_literals_.length() == 0);
619
620  const ZoneList<Handle<JSFunction> >* inlined_closures =
621      chunk()->inlined_closures();
622
623  for (int i = 0, length = inlined_closures->length();
624       i < length;
625       i++) {
626    DefineDeoptimizationLiteral(inlined_closures->at(i));
627  }
628
629  inlined_function_count_ = deoptimization_literals_.length();
630}
631
632
633void LCodeGen::RecordSafepoint(
634    LPointerMap* pointers,
635    Safepoint::Kind kind,
636    int arguments,
637    int deoptimization_index) {
638  ASSERT(kind == expected_safepoint_kind_);
639
640  const ZoneList<LOperand*>* operands = pointers->operands();
641
642  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
643      kind, arguments, deoptimization_index);
644  for (int i = 0; i < operands->length(); i++) {
645    LOperand* pointer = operands->at(i);
646    if (pointer->IsStackSlot()) {
647      safepoint.DefinePointerSlot(pointer->index());
648    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
649      safepoint.DefinePointerRegister(ToRegister(pointer));
650    }
651  }
652  if (kind & Safepoint::kWithRegisters) {
653    // Register rsi always contains a pointer to the context.
654    safepoint.DefinePointerRegister(rsi);
655  }
656}
657
658
659void LCodeGen::RecordSafepoint(LPointerMap* pointers,
660                               int deoptimization_index) {
661  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
662}
663
664
665void LCodeGen::RecordSafepoint(int deoptimization_index) {
666  LPointerMap empty_pointers(RelocInfo::kNoPosition);
667  RecordSafepoint(&empty_pointers, deoptimization_index);
668}
669
670
671void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
672                                            int arguments,
673                                            int deoptimization_index) {
674  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
675      deoptimization_index);
676}
677
678
679void LCodeGen::RecordPosition(int position) {
680  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
681  masm()->positions_recorder()->RecordPosition(position);
682}
683
684
685void LCodeGen::DoLabel(LLabel* label) {
686  if (label->is_loop_header()) {
687    Comment(";;; B%d - LOOP entry", label->block_id());
688  } else {
689    Comment(";;; B%d", label->block_id());
690  }
691  __ bind(label->label());
692  current_block_ = label->block_id();
693  LCodeGen::DoGap(label);
694}
695
696
697void LCodeGen::DoParallelMove(LParallelMove* move) {
698  resolver_.Resolve(move);
699}
700
701
702void LCodeGen::DoGap(LGap* gap) {
703  for (int i = LGap::FIRST_INNER_POSITION;
704       i <= LGap::LAST_INNER_POSITION;
705       i++) {
706    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
707    LParallelMove* move = gap->GetParallelMove(inner_pos);
708    if (move != NULL) DoParallelMove(move);
709  }
710
711  LInstruction* next = GetNextInstruction();
712  if (next != NULL && next->IsLazyBailout()) {
713    int pc = masm()->pc_offset();
714    safepoints_.SetPcAfterGap(pc);
715  }
716}
717
718
719void LCodeGen::DoParameter(LParameter* instr) {
720  // Nothing to do.
721}
722
723
724void LCodeGen::DoCallStub(LCallStub* instr) {
725  ASSERT(ToRegister(instr->result()).is(rax));
726  switch (instr->hydrogen()->major_key()) {
727    case CodeStub::RegExpConstructResult: {
728      RegExpConstructResultStub stub;
729      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
730      break;
731    }
732    case CodeStub::RegExpExec: {
733      RegExpExecStub stub;
734      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
735      break;
736    }
737    case CodeStub::SubString: {
738      SubStringStub stub;
739      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
740      break;
741    }
742    case CodeStub::NumberToString: {
743      NumberToStringStub stub;
744      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
745      break;
746    }
747    case CodeStub::StringAdd: {
748      StringAddStub stub(NO_STRING_ADD_FLAGS);
749      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
750      break;
751    }
752    case CodeStub::StringCompare: {
753      StringCompareStub stub;
754      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
755      break;
756    }
757    case CodeStub::TranscendentalCache: {
758      TranscendentalCacheStub stub(instr->transcendental_type(),
759                                   TranscendentalCacheStub::TAGGED);
760      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
761      break;
762    }
763    default:
764      UNREACHABLE();
765  }
766}
767
768
769void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
770  // Nothing to do.
771}
772
773
774void LCodeGen::DoModI(LModI* instr) {
775  if (instr->hydrogen()->HasPowerOf2Divisor()) {
776    Register dividend = ToRegister(instr->InputAt(0));
777
778    int32_t divisor =
779        HConstant::cast(instr->hydrogen()->right())->Integer32Value();
780
781    if (divisor < 0) divisor = -divisor;
782
783    NearLabel positive_dividend, done;
784    __ testl(dividend, dividend);
785    __ j(not_sign, &positive_dividend);
786    __ negl(dividend);
787    __ andl(dividend, Immediate(divisor - 1));
788    __ negl(dividend);
789    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
790      __ j(not_zero, &done);
791      DeoptimizeIf(no_condition, instr->environment());
792    } else {
793      __ jmp(&done);
794    }
795    __ bind(&positive_dividend);
796    __ andl(dividend, Immediate(divisor - 1));
797    __ bind(&done);
798  } else {
799    LOperand* right = instr->InputAt(1);
800    Register right_reg = ToRegister(right);
801
802    ASSERT(ToRegister(instr->result()).is(rdx));
803    ASSERT(ToRegister(instr->InputAt(0)).is(rax));
804    ASSERT(!right_reg.is(rax));
805    ASSERT(!right_reg.is(rdx));
806
807    // Check for x % 0.
808    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
809      __ testl(right_reg, right_reg);
810      DeoptimizeIf(zero, instr->environment());
811    }
812
813    // Sign extend eax to edx.
814    // (We are using only the low 32 bits of the values.)
815    __ cdq();
816
817    // Check for (0 % -x) that will produce negative zero.
818    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
819      NearLabel positive_left;
820      NearLabel done;
821      __ testl(rax, rax);
822      __ j(not_sign, &positive_left);
823      __ idivl(right_reg);
824
825      // Test the remainder for 0, because then the result would be -0.
826      __ testl(rdx, rdx);
827      __ j(not_zero, &done);
828
829      DeoptimizeIf(no_condition, instr->environment());
830      __ bind(&positive_left);
831      __ idivl(right_reg);
832      __ bind(&done);
833    } else {
834      __ idivl(right_reg);
835    }
836  }
837}
838
839
840void LCodeGen::DoDivI(LDivI* instr) {
841  LOperand* right = instr->InputAt(1);
842  ASSERT(ToRegister(instr->result()).is(rax));
843  ASSERT(ToRegister(instr->InputAt(0)).is(rax));
844  ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
845  ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
846
847  Register left_reg = rax;
848
849  // Check for x / 0.
850  Register right_reg = ToRegister(right);
851  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
852    __ testl(right_reg, right_reg);
853    DeoptimizeIf(zero, instr->environment());
854  }
855
856  // Check for (0 / -x) that will produce negative zero.
857  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
858    NearLabel left_not_zero;
859    __ testl(left_reg, left_reg);
860    __ j(not_zero, &left_not_zero);
861    __ testl(right_reg, right_reg);
862    DeoptimizeIf(sign, instr->environment());
863    __ bind(&left_not_zero);
864  }
865
866  // Check for (-kMinInt / -1).
867  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
868    NearLabel left_not_min_int;
869    __ cmpl(left_reg, Immediate(kMinInt));
870    __ j(not_zero, &left_not_min_int);
871    __ cmpl(right_reg, Immediate(-1));
872    DeoptimizeIf(zero, instr->environment());
873    __ bind(&left_not_min_int);
874  }
875
876  // Sign extend to rdx.
877  __ cdq();
878  __ idivl(right_reg);
879
880  // Deoptimize if remainder is not 0.
881  __ testl(rdx, rdx);
882  DeoptimizeIf(not_zero, instr->environment());
883}
884
885
886void LCodeGen::DoMulI(LMulI* instr) {
887  Register left = ToRegister(instr->InputAt(0));
888  LOperand* right = instr->InputAt(1);
889
890  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
891    __ movl(kScratchRegister, left);
892  }
893
894  bool can_overflow =
895      instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
896  if (right->IsConstantOperand()) {
897    int right_value = ToInteger32(LConstantOperand::cast(right));
898    if (right_value == -1) {
899      __ negl(left);
900    } else if (right_value == 0) {
901      __ xorl(left, left);
902    } else if (right_value == 2) {
903      __ addl(left, left);
904    } else if (!can_overflow) {
905      // If the multiplication is known to not overflow, we
906      // can use operations that don't set the overflow flag
907      // correctly.
908      switch (right_value) {
909        case 1:
910          // Do nothing.
911          break;
912        case 3:
913          __ leal(left, Operand(left, left, times_2, 0));
914          break;
915        case 4:
916          __ shll(left, Immediate(2));
917          break;
918        case 5:
919          __ leal(left, Operand(left, left, times_4, 0));
920          break;
921        case 8:
922          __ shll(left, Immediate(3));
923          break;
924        case 9:
925          __ leal(left, Operand(left, left, times_8, 0));
926          break;
927        case 16:
928          __ shll(left, Immediate(4));
929          break;
930        default:
931          __ imull(left, left, Immediate(right_value));
932          break;
933      }
934    } else {
935      __ imull(left, left, Immediate(right_value));
936    }
937  } else if (right->IsStackSlot()) {
938    __ imull(left, ToOperand(right));
939  } else {
940    __ imull(left, ToRegister(right));
941  }
942
943  if (can_overflow) {
944    DeoptimizeIf(overflow, instr->environment());
945  }
946
947  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
948    // Bail out if the result is supposed to be negative zero.
949    NearLabel done;
950    __ testl(left, left);
951    __ j(not_zero, &done);
952    if (right->IsConstantOperand()) {
953      if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
954        DeoptimizeIf(no_condition, instr->environment());
955      }
956    } else if (right->IsStackSlot()) {
957      __ or_(kScratchRegister, ToOperand(right));
958      DeoptimizeIf(sign, instr->environment());
959    } else {
960      // Test the non-zero operand for negative sign.
961      __ or_(kScratchRegister, ToRegister(right));
962      DeoptimizeIf(sign, instr->environment());
963    }
964    __ bind(&done);
965  }
966}
967
968
969void LCodeGen::DoBitI(LBitI* instr) {
970  LOperand* left = instr->InputAt(0);
971  LOperand* right = instr->InputAt(1);
972  ASSERT(left->Equals(instr->result()));
973  ASSERT(left->IsRegister());
974
975  if (right->IsConstantOperand()) {
976    int right_operand = ToInteger32(LConstantOperand::cast(right));
977    switch (instr->op()) {
978      case Token::BIT_AND:
979        __ andl(ToRegister(left), Immediate(right_operand));
980        break;
981      case Token::BIT_OR:
982        __ orl(ToRegister(left), Immediate(right_operand));
983        break;
984      case Token::BIT_XOR:
985        __ xorl(ToRegister(left), Immediate(right_operand));
986        break;
987      default:
988        UNREACHABLE();
989        break;
990    }
991  } else if (right->IsStackSlot()) {
992    switch (instr->op()) {
993      case Token::BIT_AND:
994        __ andl(ToRegister(left), ToOperand(right));
995        break;
996      case Token::BIT_OR:
997        __ orl(ToRegister(left), ToOperand(right));
998        break;
999      case Token::BIT_XOR:
1000        __ xorl(ToRegister(left), ToOperand(right));
1001        break;
1002      default:
1003        UNREACHABLE();
1004        break;
1005    }
1006  } else {
1007    ASSERT(right->IsRegister());
1008    switch (instr->op()) {
1009      case Token::BIT_AND:
1010        __ andl(ToRegister(left), ToRegister(right));
1011        break;
1012      case Token::BIT_OR:
1013        __ orl(ToRegister(left), ToRegister(right));
1014        break;
1015      case Token::BIT_XOR:
1016        __ xorl(ToRegister(left), ToRegister(right));
1017        break;
1018      default:
1019        UNREACHABLE();
1020        break;
1021    }
1022  }
1023}
1024
1025
1026void LCodeGen::DoShiftI(LShiftI* instr) {
1027  LOperand* left = instr->InputAt(0);
1028  LOperand* right = instr->InputAt(1);
1029  ASSERT(left->Equals(instr->result()));
1030  ASSERT(left->IsRegister());
1031  if (right->IsRegister()) {
1032    ASSERT(ToRegister(right).is(rcx));
1033
1034    switch (instr->op()) {
1035      case Token::SAR:
1036        __ sarl_cl(ToRegister(left));
1037        break;
1038      case Token::SHR:
1039        __ shrl_cl(ToRegister(left));
1040        if (instr->can_deopt()) {
1041          __ testl(ToRegister(left), ToRegister(left));
1042          DeoptimizeIf(negative, instr->environment());
1043        }
1044        break;
1045      case Token::SHL:
1046        __ shll_cl(ToRegister(left));
1047        break;
1048      default:
1049        UNREACHABLE();
1050        break;
1051    }
1052  } else {
1053    int value = ToInteger32(LConstantOperand::cast(right));
1054    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1055    switch (instr->op()) {
1056      case Token::SAR:
1057        if (shift_count != 0) {
1058          __ sarl(ToRegister(left), Immediate(shift_count));
1059        }
1060        break;
1061      case Token::SHR:
1062        if (shift_count == 0 && instr->can_deopt()) {
1063          __ testl(ToRegister(left), ToRegister(left));
1064          DeoptimizeIf(negative, instr->environment());
1065        } else {
1066          __ shrl(ToRegister(left), Immediate(shift_count));
1067        }
1068        break;
1069      case Token::SHL:
1070        if (shift_count != 0) {
1071          __ shll(ToRegister(left), Immediate(shift_count));
1072        }
1073        break;
1074      default:
1075        UNREACHABLE();
1076        break;
1077    }
1078  }
1079}
1080
1081
1082void LCodeGen::DoSubI(LSubI* instr) {
1083  LOperand* left = instr->InputAt(0);
1084  LOperand* right = instr->InputAt(1);
1085  ASSERT(left->Equals(instr->result()));
1086
1087  if (right->IsConstantOperand()) {
1088    __ subl(ToRegister(left),
1089            Immediate(ToInteger32(LConstantOperand::cast(right))));
1090  } else if (right->IsRegister()) {
1091    __ subl(ToRegister(left), ToRegister(right));
1092  } else {
1093    __ subl(ToRegister(left), ToOperand(right));
1094  }
1095
1096  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1097    DeoptimizeIf(overflow, instr->environment());
1098  }
1099}
1100
1101
1102void LCodeGen::DoConstantI(LConstantI* instr) {
1103  ASSERT(instr->result()->IsRegister());
1104  __ Set(ToRegister(instr->result()), instr->value());
1105}
1106
1107
1108void LCodeGen::DoConstantD(LConstantD* instr) {
1109  ASSERT(instr->result()->IsDoubleRegister());
1110  XMMRegister res = ToDoubleRegister(instr->result());
1111  double v = instr->value();
1112  uint64_t int_val = BitCast<uint64_t, double>(v);
1113  // Use xor to produce +0.0 in a fast and compact way, but avoid to
1114  // do so if the constant is -0.0.
1115  if (int_val == 0) {
1116    __ xorpd(res, res);
1117  } else {
1118    Register tmp = ToRegister(instr->TempAt(0));
1119    __ Set(tmp, int_val);
1120    __ movq(res, tmp);
1121  }
1122}
1123
1124
1125void LCodeGen::DoConstantT(LConstantT* instr) {
1126  ASSERT(instr->result()->IsRegister());
1127  __ Move(ToRegister(instr->result()), instr->value());
1128}
1129
1130
1131void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1132  Register result = ToRegister(instr->result());
1133  Register array = ToRegister(instr->InputAt(0));
1134  __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
1135}
1136
1137
1138void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
1139  Register result = ToRegister(instr->result());
1140  Register array = ToRegister(instr->InputAt(0));
1141  __ movq(result, FieldOperand(array, FixedArray::kLengthOffset));
1142}
1143
1144
1145void LCodeGen::DoExternalArrayLength(LExternalArrayLength* instr) {
1146  Register result = ToRegister(instr->result());
1147  Register array = ToRegister(instr->InputAt(0));
1148  __ movl(result, FieldOperand(array, ExternalPixelArray::kLengthOffset));
1149}
1150
1151
1152void LCodeGen::DoValueOf(LValueOf* instr) {
1153  Register input = ToRegister(instr->InputAt(0));
1154  Register result = ToRegister(instr->result());
1155  ASSERT(input.is(result));
1156  NearLabel done;
1157  // If the object is a smi return the object.
1158  __ JumpIfSmi(input, &done);
1159
1160  // If the object is not a value type, return the object.
1161  __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
1162  __ j(not_equal, &done);
1163  __ movq(result, FieldOperand(input, JSValue::kValueOffset));
1164
1165  __ bind(&done);
1166}
1167
1168
1169void LCodeGen::DoBitNotI(LBitNotI* instr) {
1170  LOperand* input = instr->InputAt(0);
1171  ASSERT(input->Equals(instr->result()));
1172  __ not_(ToRegister(input));
1173}
1174
1175
1176void LCodeGen::DoThrow(LThrow* instr) {
1177  __ push(ToRegister(instr->InputAt(0)));
1178  CallRuntime(Runtime::kThrow, 1, instr);
1179
1180  if (FLAG_debug_code) {
1181    Comment("Unreachable code.");
1182    __ int3();
1183  }
1184}
1185
1186
1187void LCodeGen::DoAddI(LAddI* instr) {
1188  LOperand* left = instr->InputAt(0);
1189  LOperand* right = instr->InputAt(1);
1190  ASSERT(left->Equals(instr->result()));
1191
1192  if (right->IsConstantOperand()) {
1193    __ addl(ToRegister(left),
1194            Immediate(ToInteger32(LConstantOperand::cast(right))));
1195  } else if (right->IsRegister()) {
1196    __ addl(ToRegister(left), ToRegister(right));
1197  } else {
1198    __ addl(ToRegister(left), ToOperand(right));
1199  }
1200
1201  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1202    DeoptimizeIf(overflow, instr->environment());
1203  }
1204}
1205
1206
1207void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1208  XMMRegister left = ToDoubleRegister(instr->InputAt(0));
1209  XMMRegister right = ToDoubleRegister(instr->InputAt(1));
1210  XMMRegister result = ToDoubleRegister(instr->result());
1211  // All operations except MOD are computed in-place.
1212  ASSERT(instr->op() == Token::MOD || left.is(result));
1213  switch (instr->op()) {
1214    case Token::ADD:
1215      __ addsd(left, right);
1216      break;
1217    case Token::SUB:
1218       __ subsd(left, right);
1219       break;
1220    case Token::MUL:
1221      __ mulsd(left, right);
1222      break;
1223    case Token::DIV:
1224      __ divsd(left, right);
1225      break;
1226    case Token::MOD:
1227      __ PrepareCallCFunction(2);
1228      __ movsd(xmm0, left);
1229      ASSERT(right.is(xmm1));
1230      __ CallCFunction(
1231          ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
1232      __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
1233      __ movsd(result, xmm0);
1234      break;
1235    default:
1236      UNREACHABLE();
1237      break;
1238  }
1239}
1240
1241
1242void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1243  ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
1244  ASSERT(ToRegister(instr->InputAt(1)).is(rax));
1245  ASSERT(ToRegister(instr->result()).is(rax));
1246
1247  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1248  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1249}
1250
1251
1252int LCodeGen::GetNextEmittedBlock(int block) {
1253  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1254    LLabel* label = chunk_->GetLabel(i);
1255    if (!label->HasReplacement()) return i;
1256  }
1257  return -1;
1258}
1259
1260
1261void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1262  int next_block = GetNextEmittedBlock(current_block_);
1263  right_block = chunk_->LookupDestination(right_block);
1264  left_block = chunk_->LookupDestination(left_block);
1265
1266  if (right_block == left_block) {
1267    EmitGoto(left_block);
1268  } else if (left_block == next_block) {
1269    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1270  } else if (right_block == next_block) {
1271    __ j(cc, chunk_->GetAssemblyLabel(left_block));
1272  } else {
1273    __ j(cc, chunk_->GetAssemblyLabel(left_block));
1274    if (cc != always) {
1275      __ jmp(chunk_->GetAssemblyLabel(right_block));
1276    }
1277  }
1278}
1279
1280
1281void LCodeGen::DoBranch(LBranch* instr) {
1282  int true_block = chunk_->LookupDestination(instr->true_block_id());
1283  int false_block = chunk_->LookupDestination(instr->false_block_id());
1284
1285  Representation r = instr->hydrogen()->representation();
1286  if (r.IsInteger32()) {
1287    Register reg = ToRegister(instr->InputAt(0));
1288    __ testl(reg, reg);
1289    EmitBranch(true_block, false_block, not_zero);
1290  } else if (r.IsDouble()) {
1291    XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
1292    __ xorpd(xmm0, xmm0);
1293    __ ucomisd(reg, xmm0);
1294    EmitBranch(true_block, false_block, not_equal);
1295  } else {
1296    ASSERT(r.IsTagged());
1297    Register reg = ToRegister(instr->InputAt(0));
1298    HType type = instr->hydrogen()->type();
1299    if (type.IsBoolean()) {
1300      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1301      EmitBranch(true_block, false_block, equal);
1302    } else if (type.IsSmi()) {
1303      __ SmiCompare(reg, Smi::FromInt(0));
1304      EmitBranch(true_block, false_block, not_equal);
1305    } else {
1306      Label* true_label = chunk_->GetAssemblyLabel(true_block);
1307      Label* false_label = chunk_->GetAssemblyLabel(false_block);
1308
1309      __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1310      __ j(equal, false_label);
1311      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1312      __ j(equal, true_label);
1313      __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1314      __ j(equal, false_label);
1315      __ Cmp(reg, Smi::FromInt(0));
1316      __ j(equal, false_label);
1317      __ JumpIfSmi(reg, true_label);
1318
1319      // Test for double values. Plus/minus zero and NaN are false.
1320      NearLabel call_stub;
1321      __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
1322                     Heap::kHeapNumberMapRootIndex);
1323      __ j(not_equal, &call_stub);
1324
1325      // HeapNumber => false iff +0, -0, or NaN. These three cases set the
1326      // zero flag when compared to zero using ucomisd.
1327      __ xorpd(xmm0, xmm0);
1328      __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
1329      __ j(zero, false_label);
1330      __ jmp(true_label);
1331
1332      // The conversion stub doesn't cause garbage collections so it's
1333      // safe to not record a safepoint after the call.
1334      __ bind(&call_stub);
1335      ToBooleanStub stub;
1336      __ Pushad();
1337      __ push(reg);
1338      __ CallStub(&stub);
1339      __ testq(rax, rax);
1340      __ Popad();
1341      EmitBranch(true_block, false_block, not_zero);
1342    }
1343  }
1344}
1345
1346
1347void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1348  block = chunk_->LookupDestination(block);
1349  int next_block = GetNextEmittedBlock(current_block_);
1350  if (block != next_block) {
1351    // Perform stack overflow check if this goto needs it before jumping.
1352    if (deferred_stack_check != NULL) {
1353      __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1354      __ j(above_equal, chunk_->GetAssemblyLabel(block));
1355      __ jmp(deferred_stack_check->entry());
1356      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1357    } else {
1358      __ jmp(chunk_->GetAssemblyLabel(block));
1359    }
1360  }
1361}
1362
1363
1364void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1365  PushSafepointRegistersScope scope(this);
1366  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
1367}
1368
1369
1370void LCodeGen::DoGoto(LGoto* instr) {
1371  class DeferredStackCheck: public LDeferredCode {
1372   public:
1373    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1374        : LDeferredCode(codegen), instr_(instr) { }
1375    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1376   private:
1377    LGoto* instr_;
1378  };
1379
1380  DeferredStackCheck* deferred = NULL;
1381  if (instr->include_stack_check()) {
1382    deferred = new DeferredStackCheck(this, instr);
1383  }
1384  EmitGoto(instr->block_id(), deferred);
1385}
1386
1387
1388inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1389  Condition cond = no_condition;
1390  switch (op) {
1391    case Token::EQ:
1392    case Token::EQ_STRICT:
1393      cond = equal;
1394      break;
1395    case Token::LT:
1396      cond = is_unsigned ? below : less;
1397      break;
1398    case Token::GT:
1399      cond = is_unsigned ? above : greater;
1400      break;
1401    case Token::LTE:
1402      cond = is_unsigned ? below_equal : less_equal;
1403      break;
1404    case Token::GTE:
1405      cond = is_unsigned ? above_equal : greater_equal;
1406      break;
1407    case Token::IN:
1408    case Token::INSTANCEOF:
1409    default:
1410      UNREACHABLE();
1411  }
1412  return cond;
1413}
1414
1415
1416void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1417  if (right->IsConstantOperand()) {
1418    int32_t value = ToInteger32(LConstantOperand::cast(right));
1419    if (left->IsRegister()) {
1420      __ cmpl(ToRegister(left), Immediate(value));
1421    } else {
1422      __ cmpl(ToOperand(left), Immediate(value));
1423    }
1424  } else if (right->IsRegister()) {
1425    __ cmpl(ToRegister(left), ToRegister(right));
1426  } else {
1427    __ cmpl(ToRegister(left), ToOperand(right));
1428  }
1429}
1430
1431
1432void LCodeGen::DoCmpID(LCmpID* instr) {
1433  LOperand* left = instr->InputAt(0);
1434  LOperand* right = instr->InputAt(1);
1435  LOperand* result = instr->result();
1436
1437  NearLabel unordered;
1438  if (instr->is_double()) {
1439    // Don't base result on EFLAGS when a NaN is involved. Instead
1440    // jump to the unordered case, which produces a false value.
1441    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1442    __ j(parity_even, &unordered);
1443  } else {
1444    EmitCmpI(left, right);
1445  }
1446
1447  NearLabel done;
1448  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1449  __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
1450  __ j(cc, &done);
1451
1452  __ bind(&unordered);
1453  __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
1454  __ bind(&done);
1455}
1456
1457
1458void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1459  LOperand* left = instr->InputAt(0);
1460  LOperand* right = instr->InputAt(1);
1461  int false_block = chunk_->LookupDestination(instr->false_block_id());
1462  int true_block = chunk_->LookupDestination(instr->true_block_id());
1463
1464  if (instr->is_double()) {
1465    // Don't base result on EFLAGS when a NaN is involved. Instead
1466    // jump to the false block.
1467    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1468    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1469  } else {
1470    EmitCmpI(left, right);
1471  }
1472
1473  Condition cc = TokenToCondition(instr->op(), instr->is_double());
1474  EmitBranch(true_block, false_block, cc);
1475}
1476
1477
1478void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1479  Register left = ToRegister(instr->InputAt(0));
1480  Register right = ToRegister(instr->InputAt(1));
1481  Register result = ToRegister(instr->result());
1482
1483  NearLabel different, done;
1484  __ cmpq(left, right);
1485  __ j(not_equal, &different);
1486  __ LoadRoot(result, Heap::kTrueValueRootIndex);
1487  __ jmp(&done);
1488  __ bind(&different);
1489  __ LoadRoot(result, Heap::kFalseValueRootIndex);
1490  __ bind(&done);
1491}
1492
1493
1494void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1495  Register left = ToRegister(instr->InputAt(0));
1496  Register right = ToRegister(instr->InputAt(1));
1497  int false_block = chunk_->LookupDestination(instr->false_block_id());
1498  int true_block = chunk_->LookupDestination(instr->true_block_id());
1499
1500  __ cmpq(left, right);
1501  EmitBranch(true_block, false_block, equal);
1502}
1503
1504
1505void LCodeGen::DoIsNull(LIsNull* instr) {
1506  Register reg = ToRegister(instr->InputAt(0));
1507  Register result = ToRegister(instr->result());
1508
1509  // If the expression is known to be a smi, then it's
1510  // definitely not null. Materialize false.
1511  // Consider adding other type and representation tests too.
1512  if (instr->hydrogen()->value()->type().IsSmi()) {
1513    __ LoadRoot(result, Heap::kFalseValueRootIndex);
1514    return;
1515  }
1516
1517  __ CompareRoot(reg, Heap::kNullValueRootIndex);
1518  if (instr->is_strict()) {
1519    ASSERT(Heap::kTrueValueRootIndex >= 0);
1520    __ movl(result, Immediate(Heap::kTrueValueRootIndex));
1521    NearLabel load;
1522    __ j(equal, &load);
1523    __ Set(result, Heap::kFalseValueRootIndex);
1524    __ bind(&load);
1525    __ LoadRootIndexed(result, result, 0);
1526  } else {
1527    NearLabel true_value, false_value, done;
1528    __ j(equal, &true_value);
1529    __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1530    __ j(equal, &true_value);
1531    __ JumpIfSmi(reg, &false_value);
1532    // Check for undetectable objects by looking in the bit field in
1533    // the map. The object has already been smi checked.
1534    Register scratch = result;
1535    __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1536    __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
1537             Immediate(1 << Map::kIsUndetectable));
1538    __ j(not_zero, &true_value);
1539    __ bind(&false_value);
1540    __ LoadRoot(result, Heap::kFalseValueRootIndex);
1541    __ jmp(&done);
1542    __ bind(&true_value);
1543    __ LoadRoot(result, Heap::kTrueValueRootIndex);
1544    __ bind(&done);
1545  }
1546}
1547
1548
1549void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1550  Register reg = ToRegister(instr->InputAt(0));
1551
1552  int false_block = chunk_->LookupDestination(instr->false_block_id());
1553
1554  if (instr->hydrogen()->representation().IsSpecialization() ||
1555      instr->hydrogen()->type().IsSmi()) {
1556    // If the expression is known to untagged or smi, then it's definitely
1557    // not null, and it can't be a an undetectable object.
1558    // Jump directly to the false block.
1559    EmitGoto(false_block);
1560    return;
1561  }
1562
1563  int true_block = chunk_->LookupDestination(instr->true_block_id());
1564
1565  __ CompareRoot(reg, Heap::kNullValueRootIndex);
1566  if (instr->is_strict()) {
1567    EmitBranch(true_block, false_block, equal);
1568  } else {
1569    Label* true_label = chunk_->GetAssemblyLabel(true_block);
1570    Label* false_label = chunk_->GetAssemblyLabel(false_block);
1571    __ j(equal, true_label);
1572    __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1573    __ j(equal, true_label);
1574    __ JumpIfSmi(reg, false_label);
1575    // Check for undetectable objects by looking in the bit field in
1576    // the map. The object has already been smi checked.
1577    Register scratch = ToRegister(instr->TempAt(0));
1578    __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1579    __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
1580             Immediate(1 << Map::kIsUndetectable));
1581    EmitBranch(true_block, false_block, not_zero);
1582  }
1583}
1584
1585
1586Condition LCodeGen::EmitIsObject(Register input,
1587                                 Label* is_not_object,
1588                                 Label* is_object) {
1589  ASSERT(!input.is(kScratchRegister));
1590
1591  __ JumpIfSmi(input, is_not_object);
1592
1593  __ CompareRoot(input, Heap::kNullValueRootIndex);
1594  __ j(equal, is_object);
1595
1596  __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
1597  // Undetectable objects behave like undefined.
1598  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
1599           Immediate(1 << Map::kIsUndetectable));
1600  __ j(not_zero, is_not_object);
1601
1602  __ movzxbl(kScratchRegister,
1603             FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
1604  __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
1605  __ j(below, is_not_object);
1606  __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
1607  return below_equal;
1608}
1609
1610
1611void LCodeGen::DoIsObject(LIsObject* instr) {
1612  Register reg = ToRegister(instr->InputAt(0));
1613  Register result = ToRegister(instr->result());
1614  Label is_false, is_true, done;
1615
1616  Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
1617  __ j(true_cond, &is_true);
1618
1619  __ bind(&is_false);
1620  __ LoadRoot(result, Heap::kFalseValueRootIndex);
1621  __ jmp(&done);
1622
1623  __ bind(&is_true);
1624  __ LoadRoot(result, Heap::kTrueValueRootIndex);
1625
1626  __ bind(&done);
1627}
1628
1629
1630void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1631  Register reg = ToRegister(instr->InputAt(0));
1632
1633  int true_block = chunk_->LookupDestination(instr->true_block_id());
1634  int false_block = chunk_->LookupDestination(instr->false_block_id());
1635  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1636  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1637
1638  Condition true_cond = EmitIsObject(reg, false_label, true_label);
1639
1640  EmitBranch(true_block, false_block, true_cond);
1641}
1642
1643
1644void LCodeGen::DoIsSmi(LIsSmi* instr) {
1645  LOperand* input_operand = instr->InputAt(0);
1646  Register result = ToRegister(instr->result());
1647  if (input_operand->IsRegister()) {
1648    Register input = ToRegister(input_operand);
1649    __ CheckSmiToIndicator(result, input);
1650  } else {
1651    Operand input = ToOperand(instr->InputAt(0));
1652    __ CheckSmiToIndicator(result, input);
1653  }
1654  // result is zero if input is a smi, and one otherwise.
1655  ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
1656  __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
1657}
1658
1659
1660void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1661  int true_block = chunk_->LookupDestination(instr->true_block_id());
1662  int false_block = chunk_->LookupDestination(instr->false_block_id());
1663
1664  Condition is_smi;
1665  if (instr->InputAt(0)->IsRegister()) {
1666    Register input = ToRegister(instr->InputAt(0));
1667    is_smi = masm()->CheckSmi(input);
1668  } else {
1669    Operand input = ToOperand(instr->InputAt(0));
1670    is_smi = masm()->CheckSmi(input);
1671  }
1672  EmitBranch(true_block, false_block, is_smi);
1673}
1674
1675
1676static InstanceType TestType(HHasInstanceType* instr) {
1677  InstanceType from = instr->from();
1678  InstanceType to = instr->to();
1679  if (from == FIRST_TYPE) return to;
1680  ASSERT(from == to || to == LAST_TYPE);
1681  return from;
1682}
1683
1684
1685static Condition BranchCondition(HHasInstanceType* instr) {
1686  InstanceType from = instr->from();
1687  InstanceType to = instr->to();
1688  if (from == to) return equal;
1689  if (to == LAST_TYPE) return above_equal;
1690  if (from == FIRST_TYPE) return below_equal;
1691  UNREACHABLE();
1692  return equal;
1693}
1694
1695
1696void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1697  Register input = ToRegister(instr->InputAt(0));
1698  Register result = ToRegister(instr->result());
1699
1700  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1701  __ testl(input, Immediate(kSmiTagMask));
1702  NearLabel done, is_false;
1703  __ j(zero, &is_false);
1704  __ CmpObjectType(input, TestType(instr->hydrogen()), result);
1705  __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
1706  __ LoadRoot(result, Heap::kTrueValueRootIndex);
1707  __ jmp(&done);
1708  __ bind(&is_false);
1709  __ LoadRoot(result, Heap::kFalseValueRootIndex);
1710  __ bind(&done);
1711}
1712
1713
1714void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1715  Register input = ToRegister(instr->InputAt(0));
1716
1717  int true_block = chunk_->LookupDestination(instr->true_block_id());
1718  int false_block = chunk_->LookupDestination(instr->false_block_id());
1719
1720  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1721
1722  __ JumpIfSmi(input, false_label);
1723
1724  __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
1725  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
1726}
1727
1728
1729void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1730  Register input = ToRegister(instr->InputAt(0));
1731  Register result = ToRegister(instr->result());
1732
1733  if (FLAG_debug_code) {
1734    __ AbortIfNotString(input);
1735  }
1736
1737  __ movl(result, FieldOperand(input, String::kHashFieldOffset));
1738  ASSERT(String::kHashShift >= kSmiTagSize);
1739  __ IndexFromHash(result, result);
1740}
1741
1742
1743void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1744  Register input = ToRegister(instr->InputAt(0));
1745  Register result = ToRegister(instr->result());
1746
1747  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1748  __ LoadRoot(result, Heap::kTrueValueRootIndex);
1749  __ testl(FieldOperand(input, String::kHashFieldOffset),
1750           Immediate(String::kContainsCachedArrayIndexMask));
1751  NearLabel done;
1752  __ j(zero, &done);
1753  __ LoadRoot(result, Heap::kFalseValueRootIndex);
1754  __ bind(&done);
1755}
1756
1757
1758void LCodeGen::DoHasCachedArrayIndexAndBranch(
1759    LHasCachedArrayIndexAndBranch* instr) {
1760  Register input = ToRegister(instr->InputAt(0));
1761
1762  int true_block = chunk_->LookupDestination(instr->true_block_id());
1763  int false_block = chunk_->LookupDestination(instr->false_block_id());
1764
1765  __ testl(FieldOperand(input, String::kHashFieldOffset),
1766           Immediate(String::kContainsCachedArrayIndexMask));
1767  EmitBranch(true_block, false_block, equal);
1768}
1769
1770
1771// Branches to a label or falls through with the answer in the z flag.
1772// Trashes the temp register and possibly input (if it and temp are aliased).
1773void LCodeGen::EmitClassOfTest(Label* is_true,
1774                               Label* is_false,
1775                               Handle<String> class_name,
1776                               Register input,
1777                               Register temp) {
1778  __ JumpIfSmi(input, is_false);
1779  __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1780  __ j(below, is_false);
1781
1782  // Map is now in temp.
1783  // Functions have class 'Function'.
1784  __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1785  if (class_name->IsEqualTo(CStrVector("Function"))) {
1786    __ j(equal, is_true);
1787  } else {
1788    __ j(equal, is_false);
1789  }
1790
1791  // Check if the constructor in the map is a function.
1792  __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
1793
1794  // As long as JS_FUNCTION_TYPE is the last instance type and it is
1795  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1796  // LAST_JS_OBJECT_TYPE.
1797  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1798  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1799
1800  // Objects with a non-function constructor have class 'Object'.
1801  __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
1802  if (class_name->IsEqualTo(CStrVector("Object"))) {
1803    __ j(not_equal, is_true);
1804  } else {
1805    __ j(not_equal, is_false);
1806  }
1807
1808  // temp now contains the constructor function. Grab the
1809  // instance class name from there.
1810  __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1811  __ movq(temp, FieldOperand(temp,
1812                             SharedFunctionInfo::kInstanceClassNameOffset));
1813  // The class name we are testing against is a symbol because it's a literal.
1814  // The name in the constructor is a symbol because of the way the context is
1815  // booted.  This routine isn't expected to work for random API-created
1816  // classes and it doesn't have to because you can't access it with natives
1817  // syntax.  Since both sides are symbols it is sufficient to use an identity
1818  // comparison.
1819  ASSERT(class_name->IsSymbol());
1820  __ Cmp(temp, class_name);
1821  // End with the answer in the z flag.
1822}
1823
1824
1825void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1826  Register input = ToRegister(instr->InputAt(0));
1827  Register result = ToRegister(instr->result());
1828  ASSERT(input.is(result));
1829  Register temp = ToRegister(instr->TempAt(0));
1830  Handle<String> class_name = instr->hydrogen()->class_name();
1831  NearLabel done;
1832  Label is_true, is_false;
1833
1834  EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
1835
1836  __ j(not_equal, &is_false);
1837
1838  __ bind(&is_true);
1839  __ LoadRoot(result, Heap::kTrueValueRootIndex);
1840  __ jmp(&done);
1841
1842  __ bind(&is_false);
1843  __ LoadRoot(result, Heap::kFalseValueRootIndex);
1844  __ bind(&done);
1845}
1846
1847
1848void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1849  Register input = ToRegister(instr->InputAt(0));
1850  Register temp = ToRegister(instr->TempAt(0));
1851  Handle<String> class_name = instr->hydrogen()->class_name();
1852
1853  int true_block = chunk_->LookupDestination(instr->true_block_id());
1854  int false_block = chunk_->LookupDestination(instr->false_block_id());
1855
1856  Label* true_label = chunk_->GetAssemblyLabel(true_block);
1857  Label* false_label = chunk_->GetAssemblyLabel(false_block);
1858
1859  EmitClassOfTest(true_label, false_label, class_name, input, temp);
1860
1861  EmitBranch(true_block, false_block, equal);
1862}
1863
1864
1865void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1866  Register reg = ToRegister(instr->InputAt(0));
1867  int true_block = instr->true_block_id();
1868  int false_block = instr->false_block_id();
1869
1870  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1871  EmitBranch(true_block, false_block, equal);
1872}
1873
1874
1875void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1876  InstanceofStub stub(InstanceofStub::kNoFlags);
1877  __ push(ToRegister(instr->InputAt(0)));
1878  __ push(ToRegister(instr->InputAt(1)));
1879  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1880  NearLabel true_value, done;
1881  __ testq(rax, rax);
1882  __ j(zero, &true_value);
1883  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
1884  __ jmp(&done);
1885  __ bind(&true_value);
1886  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
1887  __ bind(&done);
1888}
1889
1890
1891void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
1892  int true_block = chunk_->LookupDestination(instr->true_block_id());
1893  int false_block = chunk_->LookupDestination(instr->false_block_id());
1894
1895  InstanceofStub stub(InstanceofStub::kNoFlags);
1896  __ push(ToRegister(instr->InputAt(0)));
1897  __ push(ToRegister(instr->InputAt(1)));
1898  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1899  __ testq(rax, rax);
1900  EmitBranch(true_block, false_block, zero);
1901}
1902
1903
1904void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1905  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1906   public:
1907    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1908                                  LInstanceOfKnownGlobal* instr)
1909        : LDeferredCode(codegen), instr_(instr) { }
1910    virtual void Generate() {
1911      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
1912    }
1913
1914    Label* map_check() { return &map_check_; }
1915
1916   private:
1917    LInstanceOfKnownGlobal* instr_;
1918    Label map_check_;
1919  };
1920
1921
1922  DeferredInstanceOfKnownGlobal* deferred;
1923  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
1924
1925  Label done, false_result;
1926  Register object = ToRegister(instr->InputAt(0));
1927
1928  // A Smi is not an instance of anything.
1929  __ JumpIfSmi(object, &false_result);
1930
1931  // This is the inlined call site instanceof cache. The two occurences of the
1932  // hole value will be patched to the last map/result pair generated by the
1933  // instanceof stub.
1934  NearLabel cache_miss;
1935  // Use a temp register to avoid memory operands with variable lengths.
1936  Register map = ToRegister(instr->TempAt(0));
1937  __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
1938  __ bind(deferred->map_check());  // Label for calculating code patching.
1939  __ movq(kScratchRegister, factory()->the_hole_value(),
1940          RelocInfo::EMBEDDED_OBJECT);
1941  __ cmpq(map, kScratchRegister);  // Patched to cached map.
1942  __ j(not_equal, &cache_miss);
1943  // Patched to load either true or false.
1944  __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
1945#ifdef DEBUG
1946  // Check that the code size between patch label and patch sites is invariant.
1947  Label end_of_patched_code;
1948  __ bind(&end_of_patched_code);
1949  ASSERT(true);
1950#endif
1951  __ jmp(&done);
1952
1953  // The inlined call site cache did not match. Check for null and string
1954  // before calling the deferred code.
1955  __ bind(&cache_miss);  // Null is not an instance of anything.
1956  __ CompareRoot(object, Heap::kNullValueRootIndex);
1957  __ j(equal, &false_result);
1958
1959  // String values are not instances of anything.
1960  __ JumpIfNotString(object, kScratchRegister, deferred->entry());
1961
1962  __ bind(&false_result);
1963  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
1964
1965  __ bind(deferred->exit());
1966  __ bind(&done);
1967}
1968
1969
1970void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
1971                                                Label* map_check) {
1972  {
1973    PushSafepointRegistersScope scope(this);
1974    InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
1975        InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
1976    InstanceofStub stub(flags);
1977
1978    __ push(ToRegister(instr->InputAt(0)));
1979    __ Push(instr->function());
1980
1981    Register temp = ToRegister(instr->TempAt(0));
1982    static const int kAdditionalDelta = 10;
1983    int delta =
1984        masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
1985    ASSERT(delta >= 0);
1986    __ push_imm32(delta);
1987
1988    // We are pushing three values on the stack but recording a
1989    // safepoint with two arguments because stub is going to
1990    // remove the third argument from the stack before jumping
1991    // to instanceof builtin on the slow path.
1992    CallCodeGeneric(stub.GetCode(),
1993                    RelocInfo::CODE_TARGET,
1994                    instr,
1995                    RECORD_SAFEPOINT_WITH_REGISTERS,
1996                    2);
1997    ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
1998    // Move result to a register that survives the end of the
1999    // PushSafepointRegisterScope.
2000    __ movq(kScratchRegister, rax);
2001  }
2002  __ testq(kScratchRegister, kScratchRegister);
2003  Label load_false;
2004  Label done;
2005  __ j(not_zero, &load_false);
2006  __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2007  __ jmp(&done);
2008  __ bind(&load_false);
2009  __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2010  __ bind(&done);
2011}
2012
2013
2014void LCodeGen::DoCmpT(LCmpT* instr) {
2015  Token::Value op = instr->op();
2016
2017  Handle<Code> ic = CompareIC::GetUninitialized(op);
2018  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2019
2020  Condition condition = TokenToCondition(op, false);
2021  if (op == Token::GT || op == Token::LTE) {
2022    condition = ReverseCondition(condition);
2023  }
2024  NearLabel true_value, done;
2025  __ testq(rax, rax);
2026  __ j(condition, &true_value);
2027  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2028  __ jmp(&done);
2029  __ bind(&true_value);
2030  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2031  __ bind(&done);
2032}
2033
2034
2035void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
2036  Token::Value op = instr->op();
2037  int true_block = chunk_->LookupDestination(instr->true_block_id());
2038  int false_block = chunk_->LookupDestination(instr->false_block_id());
2039
2040  Handle<Code> ic = CompareIC::GetUninitialized(op);
2041  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2042
2043  // The compare stub expects compare condition and the input operands
2044  // reversed for GT and LTE.
2045  Condition condition = TokenToCondition(op, false);
2046  if (op == Token::GT || op == Token::LTE) {
2047    condition = ReverseCondition(condition);
2048  }
2049  __ testq(rax, rax);
2050  EmitBranch(true_block, false_block, condition);
2051}
2052
2053
2054void LCodeGen::DoReturn(LReturn* instr) {
2055  if (FLAG_trace) {
2056    // Preserve the return value on the stack and rely on the runtime
2057    // call to return the value in the same register.
2058    __ push(rax);
2059    __ CallRuntime(Runtime::kTraceExit, 1);
2060  }
2061  __ movq(rsp, rbp);
2062  __ pop(rbp);
2063  __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
2064}
2065
2066
2067void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2068  Register result = ToRegister(instr->result());
2069  if (result.is(rax)) {
2070    __ load_rax(instr->hydrogen()->cell().location(),
2071                RelocInfo::GLOBAL_PROPERTY_CELL);
2072  } else {
2073    __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
2074    __ movq(result, Operand(result, 0));
2075  }
2076  if (instr->hydrogen()->check_hole_value()) {
2077    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2078    DeoptimizeIf(equal, instr->environment());
2079  }
2080}
2081
2082
2083void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2084  ASSERT(ToRegister(instr->global_object()).is(rax));
2085  ASSERT(ToRegister(instr->result()).is(rax));
2086
2087  __ Move(rcx, instr->name());
2088  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
2089                                               RelocInfo::CODE_TARGET_CONTEXT;
2090  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2091  CallCode(ic, mode, instr);
2092}
2093
2094
2095void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2096  Register value = ToRegister(instr->InputAt(0));
2097  Register temp = ToRegister(instr->TempAt(0));
2098  ASSERT(!value.is(temp));
2099  bool check_hole = instr->hydrogen()->check_hole_value();
2100  if (!check_hole && value.is(rax)) {
2101    __ store_rax(instr->hydrogen()->cell().location(),
2102                 RelocInfo::GLOBAL_PROPERTY_CELL);
2103    return;
2104  }
2105  // If the cell we are storing to contains the hole it could have
2106  // been deleted from the property dictionary. In that case, we need
2107  // to update the property details in the property dictionary to mark
2108  // it as no longer deleted. We deoptimize in that case.
2109  __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
2110  if (check_hole) {
2111    __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
2112    DeoptimizeIf(equal, instr->environment());
2113  }
2114  __ movq(Operand(temp, 0), value);
2115}
2116
2117
2118void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2119  ASSERT(ToRegister(instr->global_object()).is(rdx));
2120  ASSERT(ToRegister(instr->value()).is(rax));
2121
2122  __ Move(rcx, instr->name());
2123  Handle<Code> ic = instr->strict_mode()
2124      ? isolate()->builtins()->StoreIC_Initialize_Strict()
2125      : isolate()->builtins()->StoreIC_Initialize();
2126  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2127}
2128
2129
2130void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2131  Register context = ToRegister(instr->context());
2132  Register result = ToRegister(instr->result());
2133  __ movq(result, ContextOperand(context, instr->slot_index()));
2134}
2135
2136
2137void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2138  Register context = ToRegister(instr->context());
2139  Register value = ToRegister(instr->value());
2140  __ movq(ContextOperand(context, instr->slot_index()), value);
2141  if (instr->needs_write_barrier()) {
2142    int offset = Context::SlotOffset(instr->slot_index());
2143    Register scratch = ToRegister(instr->TempAt(0));
2144    __ RecordWrite(context, offset, value, scratch);
2145  }
2146}
2147
2148
2149void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2150  Register object = ToRegister(instr->InputAt(0));
2151  Register result = ToRegister(instr->result());
2152  if (instr->hydrogen()->is_in_object()) {
2153    __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
2154  } else {
2155    __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2156    __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
2157  }
2158}
2159
2160
2161void LCodeGen::EmitLoadField(Register result,
2162                             Register object,
2163                             Handle<Map> type,
2164                             Handle<String> name) {
2165  LookupResult lookup;
2166  type->LookupInDescriptors(NULL, *name, &lookup);
2167  ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
2168  int index = lookup.GetLocalFieldIndexFromMap(*type);
2169  int offset = index * kPointerSize;
2170  if (index < 0) {
2171    // Negative property indices are in-object properties, indexed
2172    // from the end of the fixed part of the object.
2173    __ movq(result, FieldOperand(object, offset + type->instance_size()));
2174  } else {
2175    // Non-negative property indices are in the properties array.
2176    __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
2177    __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
2178  }
2179}
2180
2181
2182void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2183  Register object = ToRegister(instr->object());
2184  Register result = ToRegister(instr->result());
2185
2186  int map_count = instr->hydrogen()->types()->length();
2187  Handle<String> name = instr->hydrogen()->name();
2188
2189  if (map_count == 0) {
2190    ASSERT(instr->hydrogen()->need_generic());
2191    __ Move(rcx, instr->hydrogen()->name());
2192    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2193    CallCode(ic, RelocInfo::CODE_TARGET, instr);
2194  } else {
2195    NearLabel done;
2196    for (int i = 0; i < map_count - 1; ++i) {
2197      Handle<Map> map = instr->hydrogen()->types()->at(i);
2198      NearLabel next;
2199      __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
2200      __ j(not_equal, &next);
2201      EmitLoadField(result, object, map, name);
2202      __ jmp(&done);
2203      __ bind(&next);
2204    }
2205    Handle<Map> map = instr->hydrogen()->types()->last();
2206    __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
2207    if (instr->hydrogen()->need_generic()) {
2208      NearLabel generic;
2209      __ j(not_equal, &generic);
2210      EmitLoadField(result, object, map, name);
2211      __ jmp(&done);
2212      __ bind(&generic);
2213      __ Move(rcx, instr->hydrogen()->name());
2214      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2215      CallCode(ic, RelocInfo::CODE_TARGET, instr);
2216    } else {
2217      DeoptimizeIf(not_equal, instr->environment());
2218      EmitLoadField(result, object, map, name);
2219    }
2220    __ bind(&done);
2221  }
2222}
2223
2224
2225void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2226  ASSERT(ToRegister(instr->object()).is(rax));
2227  ASSERT(ToRegister(instr->result()).is(rax));
2228
2229  __ Move(rcx, instr->name());
2230  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2231  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2232}
2233
2234
2235void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2236  Register function = ToRegister(instr->function());
2237  Register result = ToRegister(instr->result());
2238
2239  // Check that the function really is a function.
2240  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2241  DeoptimizeIf(not_equal, instr->environment());
2242
2243  // Check whether the function has an instance prototype.
2244  NearLabel non_instance;
2245  __ testb(FieldOperand(result, Map::kBitFieldOffset),
2246           Immediate(1 << Map::kHasNonInstancePrototype));
2247  __ j(not_zero, &non_instance);
2248
2249  // Get the prototype or initial map from the function.
2250  __ movq(result,
2251         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2252
2253  // Check that the function has a prototype or an initial map.
2254  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2255  DeoptimizeIf(equal, instr->environment());
2256
2257  // If the function does not have an initial map, we're done.
2258  NearLabel done;
2259  __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2260  __ j(not_equal, &done);
2261
2262  // Get the prototype from the initial map.
2263  __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
2264  __ jmp(&done);
2265
2266  // Non-instance prototype: Fetch prototype from constructor field
2267  // in the function's map.
2268  __ bind(&non_instance);
2269  __ movq(result, FieldOperand(result, Map::kConstructorOffset));
2270
2271  // All done.
2272  __ bind(&done);
2273}
2274
2275
2276void LCodeGen::DoLoadElements(LLoadElements* instr) {
2277  Register result = ToRegister(instr->result());
2278  Register input = ToRegister(instr->InputAt(0));
2279  __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
2280  if (FLAG_debug_code) {
2281    NearLabel done;
2282    __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2283                   Heap::kFixedArrayMapRootIndex);
2284    __ j(equal, &done);
2285    __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
2286                   Heap::kFixedCOWArrayMapRootIndex);
2287    __ j(equal, &done);
2288    Register temp((result.is(rax)) ? rbx : rax);
2289    __ push(temp);
2290    __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
2291    __ movzxbq(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
2292    __ subq(temp, Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
2293    __ cmpq(temp, Immediate(kExternalArrayTypeCount));
2294    __ pop(temp);
2295    __ Check(below, "Check for fast elements failed.");
2296    __ bind(&done);
2297  }
2298}
2299
2300
2301void LCodeGen::DoLoadExternalArrayPointer(
2302    LLoadExternalArrayPointer* instr) {
2303  Register result = ToRegister(instr->result());
2304  Register input = ToRegister(instr->InputAt(0));
2305  __ movq(result, FieldOperand(input,
2306                               ExternalPixelArray::kExternalPointerOffset));
2307}
2308
2309
2310void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2311  Register arguments = ToRegister(instr->arguments());
2312  Register length = ToRegister(instr->length());
2313  Register result = ToRegister(instr->result());
2314
2315  if (instr->index()->IsRegister()) {
2316    __ subl(length, ToRegister(instr->index()));
2317  } else {
2318    __ subl(length, ToOperand(instr->index()));
2319  }
2320  DeoptimizeIf(below_equal, instr->environment());
2321
2322  // There are two words between the frame pointer and the last argument.
2323  // Subtracting from length accounts for one of them add one more.
2324  __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
2325}
2326
2327
2328void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2329  Register elements = ToRegister(instr->elements());
2330  Register key = ToRegister(instr->key());
2331  Register result = ToRegister(instr->result());
2332  ASSERT(result.is(elements));
2333
2334  // Load the result.
2335  __ movq(result, FieldOperand(elements,
2336                               key,
2337                               times_pointer_size,
2338                               FixedArray::kHeaderSize));
2339
2340  // Check for the hole value.
2341  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2342  DeoptimizeIf(equal, instr->environment());
2343}
2344
2345
2346void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2347    LLoadKeyedSpecializedArrayElement* instr) {
2348  Register external_pointer = ToRegister(instr->external_pointer());
2349  Register key = ToRegister(instr->key());
2350  ExternalArrayType array_type = instr->array_type();
2351  if (array_type == kExternalFloatArray) {
2352    XMMRegister result(ToDoubleRegister(instr->result()));
2353    __ movss(result, Operand(external_pointer, key, times_4, 0));
2354    __ cvtss2sd(result, result);
2355  } else {
2356    Register result(ToRegister(instr->result()));
2357    switch (array_type) {
2358      case kExternalByteArray:
2359        __ movsxbq(result, Operand(external_pointer, key, times_1, 0));
2360        break;
2361      case kExternalUnsignedByteArray:
2362      case kExternalPixelArray:
2363        __ movzxbq(result, Operand(external_pointer, key, times_1, 0));
2364        break;
2365      case kExternalShortArray:
2366        __ movsxwq(result, Operand(external_pointer, key, times_2, 0));
2367        break;
2368      case kExternalUnsignedShortArray:
2369        __ movzxwq(result, Operand(external_pointer, key, times_2, 0));
2370        break;
2371      case kExternalIntArray:
2372        __ movsxlq(result, Operand(external_pointer, key, times_4, 0));
2373        break;
2374      case kExternalUnsignedIntArray:
2375        __ movl(result, Operand(external_pointer, key, times_4, 0));
2376        __ testl(result, result);
2377        // TODO(danno): we could be more clever here, perhaps having a special
2378        // version of the stub that detects if the overflow case actually
2379        // happens, and generate code that returns a double rather than int.
2380        DeoptimizeIf(negative, instr->environment());
2381        break;
2382      case kExternalFloatArray:
2383        UNREACHABLE();
2384        break;
2385    }
2386  }
2387}
2388
2389
2390void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2391  ASSERT(ToRegister(instr->object()).is(rdx));
2392  ASSERT(ToRegister(instr->key()).is(rax));
2393
2394  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2395  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2396}
2397
2398
2399void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2400  Register result = ToRegister(instr->result());
2401
2402  // Check for arguments adapter frame.
2403  NearLabel done, adapted;
2404  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2405  __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
2406         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2407  __ j(equal, &adapted);
2408
2409  // No arguments adaptor frame.
2410  __ movq(result, rbp);
2411  __ jmp(&done);
2412
2413  // Arguments adaptor frame present.
2414  __ bind(&adapted);
2415  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2416
2417  // Result is the frame pointer for the frame if not adapted and for the real
2418  // frame below the adaptor frame if adapted.
2419  __ bind(&done);
2420}
2421
2422
2423void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2424  Register result = ToRegister(instr->result());
2425
2426  NearLabel done;
2427
2428  // If no arguments adaptor frame the number of arguments is fixed.
2429  if (instr->InputAt(0)->IsRegister()) {
2430    __ cmpq(rbp, ToRegister(instr->InputAt(0)));
2431  } else {
2432    __ cmpq(rbp, ToOperand(instr->InputAt(0)));
2433  }
2434  __ movl(result, Immediate(scope()->num_parameters()));
2435  __ j(equal, &done);
2436
2437  // Arguments adaptor frame present. Get argument length from there.
2438  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2439  __ SmiToInteger32(result,
2440                    Operand(result,
2441                            ArgumentsAdaptorFrameConstants::kLengthOffset));
2442
2443  // Argument length is in result register.
2444  __ bind(&done);
2445}
2446
2447
2448void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2449  Register receiver = ToRegister(instr->receiver());
2450  Register function = ToRegister(instr->function());
2451  Register length = ToRegister(instr->length());
2452  Register elements = ToRegister(instr->elements());
2453  ASSERT(receiver.is(rax));  // Used for parameter count.
2454  ASSERT(function.is(rdi));  // Required by InvokeFunction.
2455  ASSERT(ToRegister(instr->result()).is(rax));
2456
2457  // If the receiver is null or undefined, we have to pass the global object
2458  // as a receiver.
2459  NearLabel global_object, receiver_ok;
2460  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
2461  __ j(equal, &global_object);
2462  __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
2463  __ j(equal, &global_object);
2464
2465  // The receiver should be a JS object.
2466  Condition is_smi = __ CheckSmi(receiver);
2467  DeoptimizeIf(is_smi, instr->environment());
2468  __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
2469  DeoptimizeIf(below, instr->environment());
2470  __ jmp(&receiver_ok);
2471
2472  __ bind(&global_object);
2473  // TODO(kmillikin): We have a hydrogen value for the global object.  See
2474  // if it's better to use it than to explicitly fetch it from the context
2475  // here.
2476  __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
2477  __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
2478  __ bind(&receiver_ok);
2479
2480  // Copy the arguments to this function possibly from the
2481  // adaptor frame below it.
2482  const uint32_t kArgumentsLimit = 1 * KB;
2483  __ cmpq(length, Immediate(kArgumentsLimit));
2484  DeoptimizeIf(above, instr->environment());
2485
2486  __ push(receiver);
2487  __ movq(receiver, length);
2488
2489  // Loop through the arguments pushing them onto the execution
2490  // stack.
2491  NearLabel invoke, loop;
2492  // length is a small non-negative integer, due to the test above.
2493  __ testl(length, length);
2494  __ j(zero, &invoke);
2495  __ bind(&loop);
2496  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2497  __ decl(length);
2498  __ j(not_zero, &loop);
2499
2500  // Invoke the function.
2501  __ bind(&invoke);
2502  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2503  LPointerMap* pointers = instr->pointer_map();
2504  LEnvironment* env = instr->deoptimization_environment();
2505  RecordPosition(pointers->position());
2506  RegisterEnvironmentForDeoptimization(env);
2507  SafepointGenerator safepoint_generator(this,
2508                                         pointers,
2509                                         env->deoptimization_index());
2510  v8::internal::ParameterCount actual(rax);
2511  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
2512}
2513
2514
2515void LCodeGen::DoPushArgument(LPushArgument* instr) {
2516  LOperand* argument = instr->InputAt(0);
2517  if (argument->IsConstantOperand()) {
2518    EmitPushConstantOperand(argument);
2519  } else if (argument->IsRegister()) {
2520    __ push(ToRegister(argument));
2521  } else {
2522    ASSERT(!argument->IsDoubleRegister());
2523    __ push(ToOperand(argument));
2524  }
2525}
2526
2527
2528void LCodeGen::DoContext(LContext* instr) {
2529  Register result = ToRegister(instr->result());
2530  __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
2531}
2532
2533
2534void LCodeGen::DoOuterContext(LOuterContext* instr) {
2535  Register context = ToRegister(instr->context());
2536  Register result = ToRegister(instr->result());
2537  __ movq(result,
2538          Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
2539  __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
2540}
2541
2542
2543void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2544  Register result = ToRegister(instr->result());
2545  __ movq(result, GlobalObjectOperand());
2546}
2547
2548
2549void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2550  Register global = ToRegister(instr->global());
2551  Register result = ToRegister(instr->result());
2552  __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
2553}
2554
2555
2556void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2557                                 int arity,
2558                                 LInstruction* instr) {
2559  // Change context if needed.
2560  bool change_context =
2561      (info()->closure()->context() != function->context()) ||
2562      scope()->contains_with() ||
2563      (scope()->num_heap_slots() > 0);
2564  if (change_context) {
2565    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
2566  }
2567
2568  // Set rax to arguments count if adaption is not needed. Assumes that rax
2569  // is available to write to at this point.
2570  if (!function->NeedsArgumentsAdaption()) {
2571    __ Set(rax, arity);
2572  }
2573
2574  LPointerMap* pointers = instr->pointer_map();
2575  RecordPosition(pointers->position());
2576
2577  // Invoke function.
2578  if (*function == *info()->closure()) {
2579    __ CallSelf();
2580  } else {
2581    __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
2582  }
2583
2584  // Setup deoptimization.
2585  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
2586
2587  // Restore context.
2588  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2589}
2590
2591
2592void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2593  ASSERT(ToRegister(instr->result()).is(rax));
2594  __ Move(rdi, instr->function());
2595  CallKnownFunction(instr->function(), instr->arity(), instr);
2596}
2597
2598
2599void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2600  Register input_reg = ToRegister(instr->InputAt(0));
2601  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
2602                 Heap::kHeapNumberMapRootIndex);
2603  DeoptimizeIf(not_equal, instr->environment());
2604
2605  Label done;
2606  Register tmp = input_reg.is(rax) ? rcx : rax;
2607  Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
2608
2609  // Preserve the value of all registers.
2610  PushSafepointRegistersScope scope(this);
2611
2612  Label negative;
2613  __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2614  // Check the sign of the argument. If the argument is positive, just
2615  // return it. We do not need to patch the stack since |input| and
2616  // |result| are the same register and |input| will be restored
2617  // unchanged by popping safepoint registers.
2618  __ testl(tmp, Immediate(HeapNumber::kSignMask));
2619  __ j(not_zero, &negative);
2620  __ jmp(&done);
2621
2622  __ bind(&negative);
2623
2624  Label allocated, slow;
2625  __ AllocateHeapNumber(tmp, tmp2, &slow);
2626  __ jmp(&allocated);
2627
2628  // Slow case: Call the runtime system to do the number allocation.
2629  __ bind(&slow);
2630
2631  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2632  // Set the pointer to the new heap number in tmp.
2633  if (!tmp.is(rax)) {
2634    __ movq(tmp, rax);
2635  }
2636
2637  // Restore input_reg after call to runtime.
2638  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
2639
2640  __ bind(&allocated);
2641  __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
2642  __ shl(tmp2, Immediate(1));
2643  __ shr(tmp2, Immediate(1));
2644  __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
2645  __ StoreToSafepointRegisterSlot(input_reg, tmp);
2646
2647  __ bind(&done);
2648}
2649
2650
2651void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2652  Register input_reg = ToRegister(instr->InputAt(0));
2653  __ testl(input_reg, input_reg);
2654  Label is_positive;
2655  __ j(not_sign, &is_positive);
2656  __ negl(input_reg);  // Sets flags.
2657  DeoptimizeIf(negative, instr->environment());
2658  __ bind(&is_positive);
2659}
2660
2661
2662void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2663  // Class for deferred case.
2664  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2665   public:
2666    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2667                                    LUnaryMathOperation* instr)
2668        : LDeferredCode(codegen), instr_(instr) { }
2669    virtual void Generate() {
2670      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2671    }
2672   private:
2673    LUnaryMathOperation* instr_;
2674  };
2675
2676  ASSERT(instr->InputAt(0)->Equals(instr->result()));
2677  Representation r = instr->hydrogen()->value()->representation();
2678
2679  if (r.IsDouble()) {
2680    XMMRegister scratch = xmm0;
2681    XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2682    __ xorpd(scratch, scratch);
2683    __ subsd(scratch, input_reg);
2684    __ andpd(input_reg, scratch);
2685  } else if (r.IsInteger32()) {
2686    EmitIntegerMathAbs(instr);
2687  } else {  // Tagged case.
2688    DeferredMathAbsTaggedHeapNumber* deferred =
2689        new DeferredMathAbsTaggedHeapNumber(this, instr);
2690    Register input_reg = ToRegister(instr->InputAt(0));
2691    // Smi check.
2692    __ JumpIfNotSmi(input_reg, deferred->entry());
2693    __ SmiToInteger32(input_reg, input_reg);
2694    EmitIntegerMathAbs(instr);
2695    __ Integer32ToSmi(input_reg, input_reg);
2696    __ bind(deferred->exit());
2697  }
2698}
2699
2700
2701void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2702  XMMRegister xmm_scratch = xmm0;
2703  Register output_reg = ToRegister(instr->result());
2704  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2705  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
2706  __ ucomisd(input_reg, xmm_scratch);
2707
2708  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2709    DeoptimizeIf(below_equal, instr->environment());
2710  } else {
2711    DeoptimizeIf(below, instr->environment());
2712  }
2713
2714  // Use truncating instruction (OK because input is positive).
2715  __ cvttsd2si(output_reg, input_reg);
2716
2717  // Overflow is signalled with minint.
2718  __ cmpl(output_reg, Immediate(0x80000000));
2719  DeoptimizeIf(equal, instr->environment());
2720}
2721
2722
2723void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2724  const XMMRegister xmm_scratch = xmm0;
2725  Register output_reg = ToRegister(instr->result());
2726  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2727
2728  // xmm_scratch = 0.5
2729  __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
2730  __ movq(xmm_scratch, kScratchRegister);
2731
2732  // input = input + 0.5
2733  __ addsd(input_reg, xmm_scratch);
2734
2735  // We need to return -0 for the input range [-0.5, 0[, otherwise
2736  // compute Math.floor(value + 0.5).
2737  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2738    __ ucomisd(input_reg, xmm_scratch);
2739    DeoptimizeIf(below_equal, instr->environment());
2740  } else {
2741    // If we don't need to bailout on -0, we check only bailout
2742    // on negative inputs.
2743    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
2744    __ ucomisd(input_reg, xmm_scratch);
2745    DeoptimizeIf(below, instr->environment());
2746  }
2747
2748  // Compute Math.floor(value + 0.5).
2749  // Use truncating instruction (OK because input is positive).
2750  __ cvttsd2si(output_reg, input_reg);
2751
2752  // Overflow is signalled with minint.
2753  __ cmpl(output_reg, Immediate(0x80000000));
2754  DeoptimizeIf(equal, instr->environment());
2755}
2756
2757
2758void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2759  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2760  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2761  __ sqrtsd(input_reg, input_reg);
2762}
2763
2764
2765void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2766  XMMRegister xmm_scratch = xmm0;
2767  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
2768  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2769  __ xorpd(xmm_scratch, xmm_scratch);
2770  __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
2771  __ sqrtsd(input_reg, input_reg);
2772}
2773
2774
2775void LCodeGen::DoPower(LPower* instr) {
2776  LOperand* left = instr->InputAt(0);
2777  XMMRegister left_reg = ToDoubleRegister(left);
2778  ASSERT(!left_reg.is(xmm1));
2779  LOperand* right = instr->InputAt(1);
2780  XMMRegister result_reg = ToDoubleRegister(instr->result());
2781  Representation exponent_type = instr->hydrogen()->right()->representation();
2782  if (exponent_type.IsDouble()) {
2783    __ PrepareCallCFunction(2);
2784    // Move arguments to correct registers
2785    __ movsd(xmm0, left_reg);
2786    ASSERT(ToDoubleRegister(right).is(xmm1));
2787    __ CallCFunction(
2788        ExternalReference::power_double_double_function(isolate()), 2);
2789  } else if (exponent_type.IsInteger32()) {
2790    __ PrepareCallCFunction(2);
2791    // Move arguments to correct registers: xmm0 and edi (not rdi).
2792    // On Windows, the registers are xmm0 and edx.
2793    __ movsd(xmm0, left_reg);
2794#ifdef _WIN64
2795    ASSERT(ToRegister(right).is(rdx));
2796#else
2797    ASSERT(ToRegister(right).is(rdi));
2798#endif
2799    __ CallCFunction(
2800        ExternalReference::power_double_int_function(isolate()), 2);
2801  } else {
2802    ASSERT(exponent_type.IsTagged());
2803    Register right_reg = ToRegister(right);
2804
2805    Label non_smi, call;
2806    __ JumpIfNotSmi(right_reg, &non_smi);
2807    __ SmiToInteger32(right_reg, right_reg);
2808    __ cvtlsi2sd(xmm1, right_reg);
2809    __ jmp(&call);
2810
2811    __ bind(&non_smi);
2812    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
2813    DeoptimizeIf(not_equal, instr->environment());
2814    __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
2815
2816    __ bind(&call);
2817    __ PrepareCallCFunction(2);
2818    // Move arguments to correct registers xmm0 and xmm1.
2819    __ movsd(xmm0, left_reg);
2820    // Right argument is already in xmm1.
2821    __ CallCFunction(
2822        ExternalReference::power_double_double_function(isolate()), 2);
2823  }
2824  // Return value is in xmm0.
2825  __ movsd(result_reg, xmm0);
2826  // Restore context register.
2827  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2828}
2829
2830
2831void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
2832  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2833  TranscendentalCacheStub stub(TranscendentalCache::LOG,
2834                               TranscendentalCacheStub::UNTAGGED);
2835  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2836}
2837
2838
2839void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
2840  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2841  TranscendentalCacheStub stub(TranscendentalCache::COS,
2842                               TranscendentalCacheStub::UNTAGGED);
2843  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2844}
2845
2846
2847void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
2848  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2849  TranscendentalCacheStub stub(TranscendentalCache::SIN,
2850                               TranscendentalCacheStub::UNTAGGED);
2851  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2852}
2853
2854
2855void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2856  switch (instr->op()) {
2857    case kMathAbs:
2858      DoMathAbs(instr);
2859      break;
2860    case kMathFloor:
2861      DoMathFloor(instr);
2862      break;
2863    case kMathRound:
2864      DoMathRound(instr);
2865      break;
2866    case kMathSqrt:
2867      DoMathSqrt(instr);
2868      break;
2869    case kMathPowHalf:
2870      DoMathPowHalf(instr);
2871      break;
2872    case kMathCos:
2873      DoMathCos(instr);
2874      break;
2875    case kMathSin:
2876      DoMathSin(instr);
2877      break;
2878    case kMathLog:
2879      DoMathLog(instr);
2880      break;
2881
2882    default:
2883      UNREACHABLE();
2884  }
2885}
2886
2887
2888void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2889  ASSERT(ToRegister(instr->key()).is(rcx));
2890  ASSERT(ToRegister(instr->result()).is(rax));
2891
2892  int arity = instr->arity();
2893  Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
2894    arity, NOT_IN_LOOP);
2895  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2896  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2897}
2898
2899
2900void LCodeGen::DoCallNamed(LCallNamed* instr) {
2901  ASSERT(ToRegister(instr->result()).is(rax));
2902
2903  int arity = instr->arity();
2904  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
2905      arity, NOT_IN_LOOP);
2906  __ Move(rcx, instr->name());
2907  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2908  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2909}
2910
2911
2912void LCodeGen::DoCallFunction(LCallFunction* instr) {
2913  ASSERT(ToRegister(instr->result()).is(rax));
2914
2915  int arity = instr->arity();
2916  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
2917  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2918  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2919  __ Drop(1);
2920}
2921
2922
2923void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
2924  ASSERT(ToRegister(instr->result()).is(rax));
2925  int arity = instr->arity();
2926  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
2927      arity, NOT_IN_LOOP);
2928  __ Move(rcx, instr->name());
2929  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2930  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2931}
2932
2933
2934void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
2935  ASSERT(ToRegister(instr->result()).is(rax));
2936  __ Move(rdi, instr->target());
2937  CallKnownFunction(instr->target(), instr->arity(), instr);
2938}
2939
2940
2941void LCodeGen::DoCallNew(LCallNew* instr) {
2942  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
2943  ASSERT(ToRegister(instr->result()).is(rax));
2944
2945  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
2946  __ Set(rax, instr->arity());
2947  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
2948}
2949
2950
2951void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2952  CallRuntime(instr->function(), instr->arity(), instr);
2953}
2954
2955
2956void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
2957  Register object = ToRegister(instr->object());
2958  Register value = ToRegister(instr->value());
2959  int offset = instr->offset();
2960
2961  if (!instr->transition().is_null()) {
2962    __ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
2963  }
2964
2965  // Do the store.
2966  if (instr->is_in_object()) {
2967    __ movq(FieldOperand(object, offset), value);
2968    if (instr->needs_write_barrier()) {
2969      Register temp = ToRegister(instr->TempAt(0));
2970      // Update the write barrier for the object for in-object properties.
2971      __ RecordWrite(object, offset, value, temp);
2972    }
2973  } else {
2974    Register temp = ToRegister(instr->TempAt(0));
2975    __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
2976    __ movq(FieldOperand(temp, offset), value);
2977    if (instr->needs_write_barrier()) {
2978      // Update the write barrier for the properties array.
2979      // object is used as a scratch register.
2980      __ RecordWrite(temp, offset, value, object);
2981    }
2982  }
2983}
2984
2985
2986void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2987  ASSERT(ToRegister(instr->object()).is(rdx));
2988  ASSERT(ToRegister(instr->value()).is(rax));
2989
2990  __ Move(rcx, instr->hydrogen()->name());
2991  Handle<Code> ic = instr->strict_mode()
2992      ? isolate()->builtins()->StoreIC_Initialize_Strict()
2993      : isolate()->builtins()->StoreIC_Initialize();
2994  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2995}
2996
2997
2998void LCodeGen::DoStoreKeyedSpecializedArrayElement(
2999    LStoreKeyedSpecializedArrayElement* instr) {
3000  Register external_pointer = ToRegister(instr->external_pointer());
3001  Register key = ToRegister(instr->key());
3002  ExternalArrayType array_type = instr->array_type();
3003  if (array_type == kExternalFloatArray) {
3004    XMMRegister value(ToDoubleRegister(instr->value()));
3005    __ cvtsd2ss(value, value);
3006    __ movss(Operand(external_pointer, key, times_4, 0), value);
3007  } else {
3008    Register value(ToRegister(instr->value()));
3009    switch (array_type) {
3010      case kExternalPixelArray:
3011        {  // Clamp the value to [0..255].
3012          NearLabel done;
3013          __ testl(value, Immediate(0xFFFFFF00));
3014          __ j(zero, &done);
3015          __ setcc(negative, value);  // 1 if negative, 0 if positive.
3016          __ decb(value);  // 0 if negative, 255 if positive.
3017          __ bind(&done);
3018          __ movb(Operand(external_pointer, key, times_1, 0), value);
3019        }
3020        break;
3021      case kExternalByteArray:
3022      case kExternalUnsignedByteArray:
3023        __ movb(Operand(external_pointer, key, times_1, 0), value);
3024        break;
3025      case kExternalShortArray:
3026      case kExternalUnsignedShortArray:
3027        __ movw(Operand(external_pointer, key, times_2, 0), value);
3028        break;
3029      case kExternalIntArray:
3030      case kExternalUnsignedIntArray:
3031        __ movl(Operand(external_pointer, key, times_4, 0), value);
3032        break;
3033      case kExternalFloatArray:
3034        UNREACHABLE();
3035        break;
3036    }
3037  }
3038}
3039
3040
3041void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3042  if (instr->length()->IsRegister()) {
3043    __ cmpq(ToRegister(instr->index()), ToRegister(instr->length()));
3044  } else {
3045    __ cmpq(ToRegister(instr->index()), ToOperand(instr->length()));
3046  }
3047  DeoptimizeIf(above_equal, instr->environment());
3048}
3049
3050
3051void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3052  Register value = ToRegister(instr->value());
3053  Register elements = ToRegister(instr->object());
3054  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3055
3056  // Do the store.
3057  if (instr->key()->IsConstantOperand()) {
3058    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3059    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3060    int offset =
3061        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3062    __ movq(FieldOperand(elements, offset), value);
3063  } else {
3064    __ movq(FieldOperand(elements,
3065                         key,
3066                         times_pointer_size,
3067                         FixedArray::kHeaderSize),
3068            value);
3069  }
3070
3071  if (instr->hydrogen()->NeedsWriteBarrier()) {
3072    // Compute address of modified element and store it into key register.
3073    __ lea(key, FieldOperand(elements,
3074                             key,
3075                             times_pointer_size,
3076                             FixedArray::kHeaderSize));
3077    __ RecordWrite(elements, key, value);
3078  }
3079}
3080
3081
3082void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3083  ASSERT(ToRegister(instr->object()).is(rdx));
3084  ASSERT(ToRegister(instr->key()).is(rcx));
3085  ASSERT(ToRegister(instr->value()).is(rax));
3086
3087  Handle<Code> ic = instr->strict_mode()
3088      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3089      : isolate()->builtins()->KeyedStoreIC_Initialize();
3090  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3091}
3092
3093
3094void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3095  class DeferredStringCharCodeAt: public LDeferredCode {
3096   public:
3097    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3098        : LDeferredCode(codegen), instr_(instr) { }
3099    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3100   private:
3101    LStringCharCodeAt* instr_;
3102  };
3103
3104  Register string = ToRegister(instr->string());
3105  Register index = no_reg;
3106  int const_index = -1;
3107  if (instr->index()->IsConstantOperand()) {
3108    const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3109    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3110    if (!Smi::IsValid(const_index)) {
3111      // Guaranteed to be out of bounds because of the assert above.
3112      // So the bounds check that must dominate this instruction must
3113      // have deoptimized already.
3114      if (FLAG_debug_code) {
3115        __ Abort("StringCharCodeAt: out of bounds index.");
3116      }
3117      // No code needs to be generated.
3118      return;
3119    }
3120  } else {
3121    index = ToRegister(instr->index());
3122  }
3123  Register result = ToRegister(instr->result());
3124
3125  DeferredStringCharCodeAt* deferred =
3126      new DeferredStringCharCodeAt(this, instr);
3127
3128  NearLabel flat_string, ascii_string, done;
3129
3130  // Fetch the instance type of the receiver into result register.
3131  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
3132  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
3133
3134  // We need special handling for non-sequential strings.
3135  STATIC_ASSERT(kSeqStringTag == 0);
3136  __ testb(result, Immediate(kStringRepresentationMask));
3137  __ j(zero, &flat_string);
3138
3139  // Handle cons strings and go to deferred code for the rest.
3140  __ testb(result, Immediate(kIsConsStringMask));
3141  __ j(zero, deferred->entry());
3142
3143  // ConsString.
3144  // Check whether the right hand side is the empty string (i.e. if
3145  // this is really a flat string in a cons string). If that is not
3146  // the case we would rather go to the runtime system now to flatten
3147  // the string.
3148  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
3149                 Heap::kEmptyStringRootIndex);
3150  __ j(not_equal, deferred->entry());
3151  // Get the first of the two strings and load its instance type.
3152  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
3153  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
3154  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
3155  // If the first cons component is also non-flat, then go to runtime.
3156  STATIC_ASSERT(kSeqStringTag == 0);
3157  __ testb(result, Immediate(kStringRepresentationMask));
3158  __ j(not_zero, deferred->entry());
3159
3160  // Check for ASCII or two-byte string.
3161  __ bind(&flat_string);
3162  STATIC_ASSERT(kAsciiStringTag != 0);
3163  __ testb(result, Immediate(kStringEncodingMask));
3164  __ j(not_zero, &ascii_string);
3165
3166  // Two-byte string.
3167  // Load the two-byte character code into the result register.
3168  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3169  if (instr->index()->IsConstantOperand()) {
3170    __ movzxwl(result,
3171               FieldOperand(string,
3172                            SeqTwoByteString::kHeaderSize +
3173                            (kUC16Size * const_index)));
3174  } else {
3175    __ movzxwl(result, FieldOperand(string,
3176                                    index,
3177                                    times_2,
3178                                    SeqTwoByteString::kHeaderSize));
3179  }
3180  __ jmp(&done);
3181
3182  // ASCII string.
3183  // Load the byte into the result register.
3184  __ bind(&ascii_string);
3185  if (instr->index()->IsConstantOperand()) {
3186    __ movzxbl(result, FieldOperand(string,
3187                                    SeqAsciiString::kHeaderSize + const_index));
3188  } else {
3189    __ movzxbl(result, FieldOperand(string,
3190                                    index,
3191                                    times_1,
3192                                    SeqAsciiString::kHeaderSize));
3193  }
3194  __ bind(&done);
3195  __ bind(deferred->exit());
3196}
3197
3198
3199void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3200  Register string = ToRegister(instr->string());
3201  Register result = ToRegister(instr->result());
3202
3203  // TODO(3095996): Get rid of this. For now, we need to make the
3204  // result register contain a valid pointer because it is already
3205  // contained in the register pointer map.
3206  __ Set(result, 0);
3207
3208  PushSafepointRegistersScope scope(this);
3209  __ push(string);
3210  // Push the index as a smi. This is safe because of the checks in
3211  // DoStringCharCodeAt above.
3212  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3213  if (instr->index()->IsConstantOperand()) {
3214    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3215    __ Push(Smi::FromInt(const_index));
3216  } else {
3217    Register index = ToRegister(instr->index());
3218    __ Integer32ToSmi(index, index);
3219    __ push(index);
3220  }
3221  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3222  if (FLAG_debug_code) {
3223    __ AbortIfNotSmi(rax);
3224  }
3225  __ SmiToInteger32(rax, rax);
3226  __ StoreToSafepointRegisterSlot(result, rax);
3227}
3228
3229
3230void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3231  class DeferredStringCharFromCode: public LDeferredCode {
3232   public:
3233    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3234        : LDeferredCode(codegen), instr_(instr) { }
3235    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3236   private:
3237    LStringCharFromCode* instr_;
3238  };
3239
3240  DeferredStringCharFromCode* deferred =
3241      new DeferredStringCharFromCode(this, instr);
3242
3243  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3244  Register char_code = ToRegister(instr->char_code());
3245  Register result = ToRegister(instr->result());
3246  ASSERT(!char_code.is(result));
3247
3248  __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
3249  __ j(above, deferred->entry());
3250  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3251  __ movq(result, FieldOperand(result,
3252                               char_code, times_pointer_size,
3253                               FixedArray::kHeaderSize));
3254  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
3255  __ j(equal, deferred->entry());
3256  __ bind(deferred->exit());
3257}
3258
3259
3260void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3261  Register char_code = ToRegister(instr->char_code());
3262  Register result = ToRegister(instr->result());
3263
3264  // TODO(3095996): Get rid of this. For now, we need to make the
3265  // result register contain a valid pointer because it is already
3266  // contained in the register pointer map.
3267  __ Set(result, 0);
3268
3269  PushSafepointRegistersScope scope(this);
3270  __ Integer32ToSmi(char_code, char_code);
3271  __ push(char_code);
3272  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3273  __ StoreToSafepointRegisterSlot(result, rax);
3274}
3275
3276
3277void LCodeGen::DoStringLength(LStringLength* instr) {
3278  Register string = ToRegister(instr->string());
3279  Register result = ToRegister(instr->result());
3280  __ movq(result, FieldOperand(string, String::kLengthOffset));
3281}
3282
3283
3284void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3285  LOperand* input = instr->InputAt(0);
3286  ASSERT(input->IsRegister() || input->IsStackSlot());
3287  LOperand* output = instr->result();
3288  ASSERT(output->IsDoubleRegister());
3289  if (input->IsRegister()) {
3290    __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
3291  } else {
3292    __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
3293  }
3294}
3295
3296
3297void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3298  LOperand* input = instr->InputAt(0);
3299  ASSERT(input->IsRegister() && input->Equals(instr->result()));
3300  Register reg = ToRegister(input);
3301
3302  __ Integer32ToSmi(reg, reg);
3303}
3304
3305
3306void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3307  class DeferredNumberTagD: public LDeferredCode {
3308   public:
3309    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3310        : LDeferredCode(codegen), instr_(instr) { }
3311    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3312   private:
3313    LNumberTagD* instr_;
3314  };
3315
3316  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3317  Register reg = ToRegister(instr->result());
3318  Register tmp = ToRegister(instr->TempAt(0));
3319
3320  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3321  if (FLAG_inline_new) {
3322    __ AllocateHeapNumber(reg, tmp, deferred->entry());
3323  } else {
3324    __ jmp(deferred->entry());
3325  }
3326  __ bind(deferred->exit());
3327  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
3328}
3329
3330
3331void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3332  // TODO(3095996): Get rid of this. For now, we need to make the
3333  // result register contain a valid pointer because it is already
3334  // contained in the register pointer map.
3335  Register reg = ToRegister(instr->result());
3336  __ Move(reg, Smi::FromInt(0));
3337
3338  {
3339    PushSafepointRegistersScope scope(this);
3340    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3341    // Ensure that value in rax survives popping registers.
3342    __ movq(kScratchRegister, rax);
3343  }
3344  __ movq(reg, kScratchRegister);
3345}
3346
3347
3348void LCodeGen::DoSmiTag(LSmiTag* instr) {
3349  ASSERT(instr->InputAt(0)->Equals(instr->result()));
3350  Register input = ToRegister(instr->InputAt(0));
3351  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3352  __ Integer32ToSmi(input, input);
3353}
3354
3355
3356void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3357  ASSERT(instr->InputAt(0)->Equals(instr->result()));
3358  Register input = ToRegister(instr->InputAt(0));
3359  if (instr->needs_check()) {
3360    Condition is_smi = __ CheckSmi(input);
3361    DeoptimizeIf(NegateCondition(is_smi), instr->environment());
3362  }
3363  __ SmiToInteger32(input, input);
3364}
3365
3366
3367void LCodeGen::EmitNumberUntagD(Register input_reg,
3368                                XMMRegister result_reg,
3369                                bool deoptimize_on_undefined,
3370                                LEnvironment* env) {
3371  NearLabel load_smi, done;
3372
3373  // Smi check.
3374  __ JumpIfSmi(input_reg, &load_smi);
3375
3376  // Heap number map check.
3377  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3378                 Heap::kHeapNumberMapRootIndex);
3379  if (deoptimize_on_undefined) {
3380    DeoptimizeIf(not_equal, env);
3381  } else {
3382    NearLabel heap_number;
3383    __ j(equal, &heap_number);
3384    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
3385    DeoptimizeIf(not_equal, env);
3386
3387    // Convert undefined to NaN. Compute NaN as 0/0.
3388    __ xorpd(result_reg, result_reg);
3389    __ divsd(result_reg, result_reg);
3390    __ jmp(&done);
3391
3392    __ bind(&heap_number);
3393  }
3394  // Heap number to XMM conversion.
3395  __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3396  __ jmp(&done);
3397
3398  // Smi to XMM conversion
3399  __ bind(&load_smi);
3400  __ SmiToInteger32(kScratchRegister, input_reg);
3401  __ cvtlsi2sd(result_reg, kScratchRegister);
3402  __ bind(&done);
3403}
3404
3405
3406class DeferredTaggedToI: public LDeferredCode {
3407 public:
3408  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3409      : LDeferredCode(codegen), instr_(instr) { }
3410  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3411 private:
3412  LTaggedToI* instr_;
3413};
3414
3415
3416void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3417  NearLabel done, heap_number;
3418  Register input_reg = ToRegister(instr->InputAt(0));
3419
3420  // Heap number map check.
3421  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3422                 Heap::kHeapNumberMapRootIndex);
3423
3424  if (instr->truncating()) {
3425    __ j(equal, &heap_number);
3426    // Check for undefined. Undefined is converted to zero for truncating
3427    // conversions.
3428    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
3429    DeoptimizeIf(not_equal, instr->environment());
3430    __ Set(input_reg, 0);
3431    __ jmp(&done);
3432
3433    __ bind(&heap_number);
3434
3435    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3436    __ cvttsd2siq(input_reg, xmm0);
3437    __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
3438    __ cmpq(input_reg, kScratchRegister);
3439    DeoptimizeIf(equal, instr->environment());
3440  } else {
3441    // Deoptimize if we don't have a heap number.
3442    DeoptimizeIf(not_equal, instr->environment());
3443
3444    XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
3445    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3446    __ cvttsd2si(input_reg, xmm0);
3447    __ cvtlsi2sd(xmm_temp, input_reg);
3448    __ ucomisd(xmm0, xmm_temp);
3449    DeoptimizeIf(not_equal, instr->environment());
3450    DeoptimizeIf(parity_even, instr->environment());  // NaN.
3451    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3452      __ testl(input_reg, input_reg);
3453      __ j(not_zero, &done);
3454      __ movmskpd(input_reg, xmm0);
3455      __ andl(input_reg, Immediate(1));
3456      DeoptimizeIf(not_zero, instr->environment());
3457    }
3458  }
3459  __ bind(&done);
3460}
3461
3462
3463void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3464  LOperand* input = instr->InputAt(0);
3465  ASSERT(input->IsRegister());
3466  ASSERT(input->Equals(instr->result()));
3467
3468  Register input_reg = ToRegister(input);
3469  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3470  __ JumpIfNotSmi(input_reg, deferred->entry());
3471  __ SmiToInteger32(input_reg, input_reg);
3472  __ bind(deferred->exit());
3473}
3474
3475
3476void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3477  LOperand* input = instr->InputAt(0);
3478  ASSERT(input->IsRegister());
3479  LOperand* result = instr->result();
3480  ASSERT(result->IsDoubleRegister());
3481
3482  Register input_reg = ToRegister(input);
3483  XMMRegister result_reg = ToDoubleRegister(result);
3484
3485  EmitNumberUntagD(input_reg, result_reg,
3486                   instr->hydrogen()->deoptimize_on_undefined(),
3487                   instr->environment());
3488}
3489
3490
3491void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3492  LOperand* input = instr->InputAt(0);
3493  ASSERT(input->IsDoubleRegister());
3494  LOperand* result = instr->result();
3495  ASSERT(result->IsRegister());
3496
3497  XMMRegister input_reg = ToDoubleRegister(input);
3498  Register result_reg = ToRegister(result);
3499
3500  if (instr->truncating()) {
3501    // Performs a truncating conversion of a floating point number as used by
3502    // the JS bitwise operations.
3503    __ cvttsd2siq(result_reg, input_reg);
3504    __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
3505    __ cmpq(result_reg, kScratchRegister);
3506      DeoptimizeIf(equal, instr->environment());
3507  } else {
3508    __ cvttsd2si(result_reg, input_reg);
3509    __ cvtlsi2sd(xmm0, result_reg);
3510    __ ucomisd(xmm0, input_reg);
3511    DeoptimizeIf(not_equal, instr->environment());
3512    DeoptimizeIf(parity_even, instr->environment());  // NaN.
3513    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3514      NearLabel done;
3515      // The integer converted back is equal to the original. We
3516      // only have to test if we got -0 as an input.
3517      __ testl(result_reg, result_reg);
3518      __ j(not_zero, &done);
3519      __ movmskpd(result_reg, input_reg);
3520      // Bit 0 contains the sign of the double in input_reg.
3521      // If input was positive, we are ok and return 0, otherwise
3522      // deoptimize.
3523      __ andl(result_reg, Immediate(1));
3524      DeoptimizeIf(not_zero, instr->environment());
3525      __ bind(&done);
3526    }
3527  }
3528}
3529
3530
3531void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3532  LOperand* input = instr->InputAt(0);
3533  Condition cc = masm()->CheckSmi(ToRegister(input));
3534  DeoptimizeIf(NegateCondition(cc), instr->environment());
3535}
3536
3537
3538void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
3539  LOperand* input = instr->InputAt(0);
3540  Condition cc = masm()->CheckSmi(ToRegister(input));
3541  DeoptimizeIf(cc, instr->environment());
3542}
3543
3544
3545void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3546  Register input = ToRegister(instr->InputAt(0));
3547  InstanceType first = instr->hydrogen()->first();
3548  InstanceType last = instr->hydrogen()->last();
3549
3550  __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
3551
3552  // If there is only one type in the interval check for equality.
3553  if (first == last) {
3554    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
3555            Immediate(static_cast<int8_t>(first)));
3556    DeoptimizeIf(not_equal, instr->environment());
3557  } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
3558    // String has a dedicated bit in instance type.
3559    __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
3560             Immediate(kIsNotStringMask));
3561    DeoptimizeIf(not_zero, instr->environment());
3562  } else {
3563    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
3564            Immediate(static_cast<int8_t>(first)));
3565    DeoptimizeIf(below, instr->environment());
3566    // Omit check for the last type.
3567    if (last != LAST_TYPE) {
3568      __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
3569              Immediate(static_cast<int8_t>(last)));
3570      DeoptimizeIf(above, instr->environment());
3571    }
3572  }
3573}
3574
3575
3576void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
3577  ASSERT(instr->InputAt(0)->IsRegister());
3578  Register reg = ToRegister(instr->InputAt(0));
3579  __ Cmp(reg, instr->hydrogen()->target());
3580  DeoptimizeIf(not_equal, instr->environment());
3581}
3582
3583
3584void LCodeGen::DoCheckMap(LCheckMap* instr) {
3585  LOperand* input = instr->InputAt(0);
3586  ASSERT(input->IsRegister());
3587  Register reg = ToRegister(input);
3588  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
3589         instr->hydrogen()->map());
3590  DeoptimizeIf(not_equal, instr->environment());
3591}
3592
3593
3594void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
3595  if (heap()->InNewSpace(*object)) {
3596    Handle<JSGlobalPropertyCell> cell =
3597        factory()->NewJSGlobalPropertyCell(object);
3598    __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
3599    __ movq(result, Operand(result, 0));
3600  } else {
3601    __ Move(result, object);
3602  }
3603}
3604
3605
3606void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
3607  Register reg = ToRegister(instr->TempAt(0));
3608
3609  Handle<JSObject> holder = instr->holder();
3610  Handle<JSObject> current_prototype = instr->prototype();
3611
3612  // Load prototype object.
3613  LoadHeapObject(reg, current_prototype);
3614
3615  // Check prototype maps up to the holder.
3616  while (!current_prototype.is_identical_to(holder)) {
3617    __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
3618           Handle<Map>(current_prototype->map()));
3619    DeoptimizeIf(not_equal, instr->environment());
3620    current_prototype =
3621        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
3622    // Load next prototype object.
3623    LoadHeapObject(reg, current_prototype);
3624  }
3625
3626  // Check the holder map.
3627  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
3628         Handle<Map>(current_prototype->map()));
3629  DeoptimizeIf(not_equal, instr->environment());
3630}
3631
3632
3633void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
3634  // Setup the parameters to the stub/runtime call.
3635  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3636  __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
3637  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
3638  __ Push(instr->hydrogen()->constant_elements());
3639
3640  // Pick the right runtime function or stub to call.
3641  int length = instr->hydrogen()->length();
3642  if (instr->hydrogen()->IsCopyOnWrite()) {
3643    ASSERT(instr->hydrogen()->depth() == 1);
3644    FastCloneShallowArrayStub::Mode mode =
3645        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
3646    FastCloneShallowArrayStub stub(mode, length);
3647    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3648  } else if (instr->hydrogen()->depth() > 1) {
3649    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
3650  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
3651    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
3652  } else {
3653    FastCloneShallowArrayStub::Mode mode =
3654        FastCloneShallowArrayStub::CLONE_ELEMENTS;
3655    FastCloneShallowArrayStub stub(mode, length);
3656    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3657  }
3658}
3659
3660
3661void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
3662  // Setup the parameters to the stub/runtime call.
3663  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3664  __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
3665  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
3666  __ Push(instr->hydrogen()->constant_properties());
3667  __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
3668
3669  // Pick the right runtime function to call.
3670  if (instr->hydrogen()->depth() > 1) {
3671    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
3672  } else {
3673    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
3674  }
3675}
3676
3677
3678void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
3679  ASSERT(ToRegister(instr->InputAt(0)).is(rax));
3680  __ push(rax);
3681  CallRuntime(Runtime::kToFastProperties, 1, instr);
3682}
3683
3684
3685void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
3686  NearLabel materialized;
3687  // Registers will be used as follows:
3688  // rdi = JS function.
3689  // rcx = literals array.
3690  // rbx = regexp literal.
3691  // rax = regexp literal clone.
3692  __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3693  __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
3694  int literal_offset = FixedArray::kHeaderSize +
3695      instr->hydrogen()->literal_index() * kPointerSize;
3696  __ movq(rbx, FieldOperand(rcx, literal_offset));
3697  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
3698  __ j(not_equal, &materialized);
3699
3700  // Create regexp literal using runtime function
3701  // Result will be in rax.
3702  __ push(rcx);
3703  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
3704  __ Push(instr->hydrogen()->pattern());
3705  __ Push(instr->hydrogen()->flags());
3706  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
3707  __ movq(rbx, rax);
3708
3709  __ bind(&materialized);
3710  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
3711  Label allocated, runtime_allocate;
3712  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
3713  __ jmp(&allocated);
3714
3715  __ bind(&runtime_allocate);
3716  __ push(rbx);
3717  __ Push(Smi::FromInt(size));
3718  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
3719  __ pop(rbx);
3720
3721  __ bind(&allocated);
3722  // Copy the content into the newly allocated memory.
3723  // (Unroll copy loop once for better throughput).
3724  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
3725    __ movq(rdx, FieldOperand(rbx, i));
3726    __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
3727    __ movq(FieldOperand(rax, i), rdx);
3728    __ movq(FieldOperand(rax, i + kPointerSize), rcx);
3729  }
3730  if ((size % (2 * kPointerSize)) != 0) {
3731    __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
3732    __ movq(FieldOperand(rax, size - kPointerSize), rdx);
3733  }
3734}
3735
3736
3737void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
3738  // Use the fast case closure allocation code that allocates in new
3739  // space for nested functions that don't need literals cloning.
3740  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
3741  bool pretenure = instr->hydrogen()->pretenure();
3742  if (!pretenure && shared_info->num_literals() == 0) {
3743    FastNewClosureStub stub(
3744        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
3745    __ Push(shared_info);
3746    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3747  } else {
3748    __ push(rsi);
3749    __ Push(shared_info);
3750    __ PushRoot(pretenure ?
3751                Heap::kTrueValueRootIndex :
3752                Heap::kFalseValueRootIndex);
3753    CallRuntime(Runtime::kNewClosure, 3, instr);
3754  }
3755}
3756
3757
3758void LCodeGen::DoTypeof(LTypeof* instr) {
3759  LOperand* input = instr->InputAt(0);
3760  if (input->IsConstantOperand()) {
3761    __ Push(ToHandle(LConstantOperand::cast(input)));
3762  } else if (input->IsRegister()) {
3763    __ push(ToRegister(input));
3764  } else {
3765    ASSERT(input->IsStackSlot());
3766    __ push(ToOperand(input));
3767  }
3768  CallRuntime(Runtime::kTypeof, 1, instr);
3769}
3770
3771
3772void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
3773  Register input = ToRegister(instr->InputAt(0));
3774  Register result = ToRegister(instr->result());
3775  Label true_label;
3776  Label false_label;
3777  NearLabel done;
3778
3779  Condition final_branch_condition = EmitTypeofIs(&true_label,
3780                                                  &false_label,
3781                                                  input,
3782                                                  instr->type_literal());
3783  __ j(final_branch_condition, &true_label);
3784  __ bind(&false_label);
3785  __ LoadRoot(result, Heap::kFalseValueRootIndex);
3786  __ jmp(&done);
3787
3788  __ bind(&true_label);
3789  __ LoadRoot(result, Heap::kTrueValueRootIndex);
3790
3791  __ bind(&done);
3792}
3793
3794
3795void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
3796  ASSERT(operand->IsConstantOperand());
3797  LConstantOperand* const_op = LConstantOperand::cast(operand);
3798  Handle<Object> literal = chunk_->LookupLiteral(const_op);
3799  Representation r = chunk_->LookupLiteralRepresentation(const_op);
3800  if (r.IsInteger32()) {
3801    ASSERT(literal->IsNumber());
3802    __ push(Immediate(static_cast<int32_t>(literal->Number())));
3803  } else if (r.IsDouble()) {
3804    Abort("unsupported double immediate");
3805  } else {
3806    ASSERT(r.IsTagged());
3807    __ Push(literal);
3808  }
3809}
3810
3811
3812void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
3813  Register input = ToRegister(instr->InputAt(0));
3814  int true_block = chunk_->LookupDestination(instr->true_block_id());
3815  int false_block = chunk_->LookupDestination(instr->false_block_id());
3816  Label* true_label = chunk_->GetAssemblyLabel(true_block);
3817  Label* false_label = chunk_->GetAssemblyLabel(false_block);
3818
3819  Condition final_branch_condition = EmitTypeofIs(true_label,
3820                                                  false_label,
3821                                                  input,
3822                                                  instr->type_literal());
3823
3824  EmitBranch(true_block, false_block, final_branch_condition);
3825}
3826
3827
3828Condition LCodeGen::EmitTypeofIs(Label* true_label,
3829                                 Label* false_label,
3830                                 Register input,
3831                                 Handle<String> type_name) {
3832  Condition final_branch_condition = no_condition;
3833  if (type_name->Equals(heap()->number_symbol())) {
3834    __ JumpIfSmi(input, true_label);
3835    __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
3836                   Heap::kHeapNumberMapRootIndex);
3837
3838    final_branch_condition = equal;
3839
3840  } else if (type_name->Equals(heap()->string_symbol())) {
3841    __ JumpIfSmi(input, false_label);
3842    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
3843    __ j(above_equal, false_label);
3844    __ testb(FieldOperand(input, Map::kBitFieldOffset),
3845             Immediate(1 << Map::kIsUndetectable));
3846    final_branch_condition = zero;
3847
3848  } else if (type_name->Equals(heap()->boolean_symbol())) {
3849    __ CompareRoot(input, Heap::kTrueValueRootIndex);
3850    __ j(equal, true_label);
3851    __ CompareRoot(input, Heap::kFalseValueRootIndex);
3852    final_branch_condition = equal;
3853
3854  } else if (type_name->Equals(heap()->undefined_symbol())) {
3855    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
3856    __ j(equal, true_label);
3857    __ JumpIfSmi(input, false_label);
3858    // Check for undetectable objects => true.
3859    __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
3860    __ testb(FieldOperand(input, Map::kBitFieldOffset),
3861             Immediate(1 << Map::kIsUndetectable));
3862    final_branch_condition = not_zero;
3863
3864  } else if (type_name->Equals(heap()->function_symbol())) {
3865    __ JumpIfSmi(input, false_label);
3866    __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
3867    final_branch_condition = above_equal;
3868
3869  } else if (type_name->Equals(heap()->object_symbol())) {
3870    __ JumpIfSmi(input, false_label);
3871    __ CompareRoot(input, Heap::kNullValueRootIndex);
3872    __ j(equal, true_label);
3873    __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
3874    __ j(below, false_label);
3875    __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
3876    __ j(above_equal, false_label);
3877    // Check for undetectable objects => false.
3878    __ testb(FieldOperand(input, Map::kBitFieldOffset),
3879             Immediate(1 << Map::kIsUndetectable));
3880    final_branch_condition = zero;
3881
3882  } else {
3883    final_branch_condition = never;
3884    __ jmp(false_label);
3885  }
3886
3887  return final_branch_condition;
3888}
3889
3890
3891void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
3892  Register result = ToRegister(instr->result());
3893  NearLabel true_label;
3894  NearLabel false_label;
3895  NearLabel done;
3896
3897  EmitIsConstructCall(result);
3898  __ j(equal, &true_label);
3899
3900  __ LoadRoot(result, Heap::kFalseValueRootIndex);
3901  __ jmp(&done);
3902
3903  __ bind(&true_label);
3904  __ LoadRoot(result, Heap::kTrueValueRootIndex);
3905
3906
3907  __ bind(&done);
3908}
3909
3910
3911void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3912  Register temp = ToRegister(instr->TempAt(0));
3913  int true_block = chunk_->LookupDestination(instr->true_block_id());
3914  int false_block = chunk_->LookupDestination(instr->false_block_id());
3915
3916  EmitIsConstructCall(temp);
3917  EmitBranch(true_block, false_block, equal);
3918}
3919
3920
3921void LCodeGen::EmitIsConstructCall(Register temp) {
3922  // Get the frame pointer for the calling frame.
3923  __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3924
3925  // Skip the arguments adaptor frame if it exists.
3926  NearLabel check_frame_marker;
3927  __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
3928         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
3929  __ j(not_equal, &check_frame_marker);
3930  __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
3931
3932  // Check the marker in the calling frame.
3933  __ bind(&check_frame_marker);
3934  __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
3935         Smi::FromInt(StackFrame::CONSTRUCT));
3936}
3937
3938
3939void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
3940  // No code for lazy bailout instruction. Used to capture environment after a
3941  // call for populating the safepoint data with deoptimization data.
3942}
3943
3944
3945void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3946  DeoptimizeIf(no_condition, instr->environment());
3947}
3948
3949
3950void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3951  LOperand* obj = instr->object();
3952  LOperand* key = instr->key();
3953  // Push object.
3954  if (obj->IsRegister()) {
3955    __ push(ToRegister(obj));
3956  } else {
3957    __ push(ToOperand(obj));
3958  }
3959  // Push key.
3960  if (key->IsConstantOperand()) {
3961    EmitPushConstantOperand(key);
3962  } else if (key->IsRegister()) {
3963    __ push(ToRegister(key));
3964  } else {
3965    __ push(ToOperand(key));
3966  }
3967  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
3968  LPointerMap* pointers = instr->pointer_map();
3969  LEnvironment* env = instr->deoptimization_environment();
3970  RecordPosition(pointers->position());
3971  RegisterEnvironmentForDeoptimization(env);
3972  // Create safepoint generator that will also ensure enough space in the
3973  // reloc info for patching in deoptimization (since this is invoking a
3974  // builtin)
3975  SafepointGenerator safepoint_generator(this,
3976                                         pointers,
3977                                         env->deoptimization_index());
3978  __ Push(Smi::FromInt(strict_mode_flag()));
3979  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
3980}
3981
3982
3983void LCodeGen::DoStackCheck(LStackCheck* instr) {
3984  // Perform stack overflow check.
3985  NearLabel done;
3986  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
3987  __ j(above_equal, &done);
3988
3989  StackCheckStub stub;
3990  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3991  __ bind(&done);
3992}
3993
3994
3995void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3996  // This is a pseudo-instruction that ensures that the environment here is
3997  // properly registered for deoptimization and records the assembler's PC
3998  // offset.
3999  LEnvironment* environment = instr->environment();
4000  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4001                                   instr->SpilledDoubleRegisterArray());
4002
4003  // If the environment were already registered, we would have no way of
4004  // backpatching it with the spill slot operands.
4005  ASSERT(!environment->HasBeenRegistered());
4006  RegisterEnvironmentForDeoptimization(environment);
4007  ASSERT(osr_pc_offset_ == -1);
4008  osr_pc_offset_ = masm()->pc_offset();
4009}
4010
4011#undef __
4012
4013} }  // namespace v8::internal
4014
4015#endif  // V8_TARGET_ARCH_X64
4016