1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/compiler/code-generator-impl.h"
8#include "src/compiler/gap-resolver.h"
9#include "src/compiler/node-matchers.h"
10#include "src/compiler/node-properties-inl.h"
11#include "src/scopes.h"
12#include "src/x64/assembler-x64.h"
13#include "src/x64/macro-assembler-x64.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19#define __ masm()->
20
21
22// TODO(turbofan): Cleanup these hacks.
23enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
24
25
26struct Immediate64 {
27  uint64_t value;
28  Handle<Object> handle;
29  ExternalReference reference;
30  Immediate64Type type;
31};
32
33
34enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
35
36
37struct RegisterOrOperand {
38  RegisterOrOperand() : operand(no_reg, 0) {}
39  Register reg;
40  DoubleRegister double_reg;
41  Operand operand;
42  RegisterOrOperandType type;
43};
44
45
46// Adds X64 specific methods for decoding operands.
47class X64OperandConverter : public InstructionOperandConverter {
48 public:
49  X64OperandConverter(CodeGenerator* gen, Instruction* instr)
50      : InstructionOperandConverter(gen, instr) {}
51
52  RegisterOrOperand InputRegisterOrOperand(int index) {
53    return ToRegisterOrOperand(instr_->InputAt(index));
54  }
55
56  Immediate InputImmediate(int index) {
57    return ToImmediate(instr_->InputAt(index));
58  }
59
60  RegisterOrOperand OutputRegisterOrOperand() {
61    return ToRegisterOrOperand(instr_->Output());
62  }
63
64  Immediate64 InputImmediate64(int index) {
65    return ToImmediate64(instr_->InputAt(index));
66  }
67
68  Immediate64 ToImmediate64(InstructionOperand* operand) {
69    Constant constant = ToConstant(operand);
70    Immediate64 immediate;
71    immediate.value = 0xbeefdeaddeefbeed;
72    immediate.type = kImm64Value;
73    switch (constant.type()) {
74      case Constant::kInt32:
75      case Constant::kInt64:
76        immediate.value = constant.ToInt64();
77        return immediate;
78      case Constant::kFloat64:
79        immediate.type = kImm64Handle;
80        immediate.handle =
81            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
82        return immediate;
83      case Constant::kExternalReference:
84        immediate.type = kImm64Reference;
85        immediate.reference = constant.ToExternalReference();
86        return immediate;
87      case Constant::kHeapObject:
88        immediate.type = kImm64Handle;
89        immediate.handle = constant.ToHeapObject();
90        return immediate;
91    }
92    UNREACHABLE();
93    return immediate;
94  }
95
96  Immediate ToImmediate(InstructionOperand* operand) {
97    Constant constant = ToConstant(operand);
98    switch (constant.type()) {
99      case Constant::kInt32:
100        return Immediate(constant.ToInt32());
101      case Constant::kInt64:
102      case Constant::kFloat64:
103      case Constant::kExternalReference:
104      case Constant::kHeapObject:
105        break;
106    }
107    UNREACHABLE();
108    return Immediate(-1);
109  }
110
111  Operand ToOperand(InstructionOperand* op, int extra = 0) {
112    RegisterOrOperand result = ToRegisterOrOperand(op, extra);
113    DCHECK_EQ(kOperand, result.type);
114    return result.operand;
115  }
116
117  RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
118    RegisterOrOperand result;
119    if (op->IsRegister()) {
120      DCHECK(extra == 0);
121      result.type = kRegister;
122      result.reg = ToRegister(op);
123      return result;
124    } else if (op->IsDoubleRegister()) {
125      DCHECK(extra == 0);
126      DCHECK(extra == 0);
127      result.type = kDoubleRegister;
128      result.double_reg = ToDoubleRegister(op);
129      return result;
130    }
131
132    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
133
134    result.type = kOperand;
135    // The linkage computes where all spill slots are located.
136    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
137    result.operand =
138        Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
139    return result;
140  }
141
142  Operand MemoryOperand(int* first_input) {
143    const int offset = *first_input;
144    switch (AddressingModeField::decode(instr_->opcode())) {
145      case kMode_MR1I: {
146        *first_input += 2;
147        Register index = InputRegister(offset + 1);
148        return Operand(InputRegister(offset + 0), index, times_1,
149                       0);  // TODO(dcarney): K != 0
150      }
151      case kMode_MRI:
152        *first_input += 2;
153        return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
154      default:
155        UNREACHABLE();
156        return Operand(no_reg, 0);
157    }
158  }
159
160  Operand MemoryOperand() {
161    int first_input = 0;
162    return MemoryOperand(&first_input);
163  }
164};
165
166
167static bool HasImmediateInput(Instruction* instr, int index) {
168  return instr->InputAt(index)->IsImmediate();
169}
170
171
172#define ASSEMBLE_BINOP(asm_instr)                            \
173  do {                                                       \
174    if (HasImmediateInput(instr, 1)) {                       \
175      RegisterOrOperand input = i.InputRegisterOrOperand(0); \
176      if (input.type == kRegister) {                         \
177        __ asm_instr(input.reg, i.InputImmediate(1));        \
178      } else {                                               \
179        __ asm_instr(input.operand, i.InputImmediate(1));    \
180      }                                                      \
181    } else {                                                 \
182      RegisterOrOperand input = i.InputRegisterOrOperand(1); \
183      if (input.type == kRegister) {                         \
184        __ asm_instr(i.InputRegister(0), input.reg);         \
185      } else {                                               \
186        __ asm_instr(i.InputRegister(0), input.operand);     \
187      }                                                      \
188    }                                                        \
189  } while (0)
190
191
192#define ASSEMBLE_SHIFT(asm_instr, width)                                 \
193  do {                                                                   \
194    if (HasImmediateInput(instr, 1)) {                                   \
195      __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
196    } else {                                                             \
197      __ asm_instr##_cl(i.OutputRegister());                             \
198    }                                                                    \
199  } while (0)
200
201
202// Assembles an instruction after register allocation, producing machine code.
203void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
204  X64OperandConverter i(this, instr);
205
206  switch (ArchOpcodeField::decode(instr->opcode())) {
207    case kArchCallCodeObject: {
208      EnsureSpaceForLazyDeopt();
209      if (HasImmediateInput(instr, 0)) {
210        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
211        __ Call(code, RelocInfo::CODE_TARGET);
212      } else {
213        Register reg = i.InputRegister(0);
214        int entry = Code::kHeaderSize - kHeapObjectTag;
215        __ Call(Operand(reg, entry));
216      }
217      AddSafepointAndDeopt(instr);
218      break;
219    }
220    case kArchCallJSFunction: {
221      EnsureSpaceForLazyDeopt();
222      Register func = i.InputRegister(0);
223      if (FLAG_debug_code) {
224        // Check the function's context matches the context argument.
225        __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
226        __ Assert(equal, kWrongFunctionContext);
227      }
228      __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
229      AddSafepointAndDeopt(instr);
230      break;
231    }
232    case kArchJmp:
233      __ jmp(code_->GetLabel(i.InputBlock(0)));
234      break;
235    case kArchNop:
236      // don't emit code for nops.
237      break;
238    case kArchRet:
239      AssembleReturn();
240      break;
241    case kArchTruncateDoubleToI:
242      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
243      break;
244    case kX64Add32:
245      ASSEMBLE_BINOP(addl);
246      break;
247    case kX64Add:
248      ASSEMBLE_BINOP(addq);
249      break;
250    case kX64Sub32:
251      ASSEMBLE_BINOP(subl);
252      break;
253    case kX64Sub:
254      ASSEMBLE_BINOP(subq);
255      break;
256    case kX64And32:
257      ASSEMBLE_BINOP(andl);
258      break;
259    case kX64And:
260      ASSEMBLE_BINOP(andq);
261      break;
262    case kX64Cmp32:
263      ASSEMBLE_BINOP(cmpl);
264      break;
265    case kX64Cmp:
266      ASSEMBLE_BINOP(cmpq);
267      break;
268    case kX64Test32:
269      ASSEMBLE_BINOP(testl);
270      break;
271    case kX64Test:
272      ASSEMBLE_BINOP(testq);
273      break;
274    case kX64Imul32:
275      if (HasImmediateInput(instr, 1)) {
276        RegisterOrOperand input = i.InputRegisterOrOperand(0);
277        if (input.type == kRegister) {
278          __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
279        } else {
280          __ movq(kScratchRegister, input.operand);
281          __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
282        }
283      } else {
284        RegisterOrOperand input = i.InputRegisterOrOperand(1);
285        if (input.type == kRegister) {
286          __ imull(i.OutputRegister(), input.reg);
287        } else {
288          __ imull(i.OutputRegister(), input.operand);
289        }
290      }
291      break;
292    case kX64Imul:
293      if (HasImmediateInput(instr, 1)) {
294        RegisterOrOperand input = i.InputRegisterOrOperand(0);
295        if (input.type == kRegister) {
296          __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
297        } else {
298          __ movq(kScratchRegister, input.operand);
299          __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
300        }
301      } else {
302        RegisterOrOperand input = i.InputRegisterOrOperand(1);
303        if (input.type == kRegister) {
304          __ imulq(i.OutputRegister(), input.reg);
305        } else {
306          __ imulq(i.OutputRegister(), input.operand);
307        }
308      }
309      break;
310    case kX64Idiv32:
311      __ cdq();
312      __ idivl(i.InputRegister(1));
313      break;
314    case kX64Idiv:
315      __ cqo();
316      __ idivq(i.InputRegister(1));
317      break;
318    case kX64Udiv32:
319      __ xorl(rdx, rdx);
320      __ divl(i.InputRegister(1));
321      break;
322    case kX64Udiv:
323      __ xorq(rdx, rdx);
324      __ divq(i.InputRegister(1));
325      break;
326    case kX64Not: {
327      RegisterOrOperand output = i.OutputRegisterOrOperand();
328      if (output.type == kRegister) {
329        __ notq(output.reg);
330      } else {
331        __ notq(output.operand);
332      }
333      break;
334    }
335    case kX64Not32: {
336      RegisterOrOperand output = i.OutputRegisterOrOperand();
337      if (output.type == kRegister) {
338        __ notl(output.reg);
339      } else {
340        __ notl(output.operand);
341      }
342      break;
343    }
344    case kX64Neg: {
345      RegisterOrOperand output = i.OutputRegisterOrOperand();
346      if (output.type == kRegister) {
347        __ negq(output.reg);
348      } else {
349        __ negq(output.operand);
350      }
351      break;
352    }
353    case kX64Neg32: {
354      RegisterOrOperand output = i.OutputRegisterOrOperand();
355      if (output.type == kRegister) {
356        __ negl(output.reg);
357      } else {
358        __ negl(output.operand);
359      }
360      break;
361    }
362    case kX64Or32:
363      ASSEMBLE_BINOP(orl);
364      break;
365    case kX64Or:
366      ASSEMBLE_BINOP(orq);
367      break;
368    case kX64Xor32:
369      ASSEMBLE_BINOP(xorl);
370      break;
371    case kX64Xor:
372      ASSEMBLE_BINOP(xorq);
373      break;
374    case kX64Shl32:
375      ASSEMBLE_SHIFT(shll, 5);
376      break;
377    case kX64Shl:
378      ASSEMBLE_SHIFT(shlq, 6);
379      break;
380    case kX64Shr32:
381      ASSEMBLE_SHIFT(shrl, 5);
382      break;
383    case kX64Shr:
384      ASSEMBLE_SHIFT(shrq, 6);
385      break;
386    case kX64Sar32:
387      ASSEMBLE_SHIFT(sarl, 5);
388      break;
389    case kX64Sar:
390      ASSEMBLE_SHIFT(sarq, 6);
391      break;
392    case kX64Ror32:
393      ASSEMBLE_SHIFT(rorl, 5);
394      break;
395    case kX64Ror:
396      ASSEMBLE_SHIFT(rorq, 6);
397      break;
398    case kSSEFloat64Cmp: {
399      RegisterOrOperand input = i.InputRegisterOrOperand(1);
400      if (input.type == kDoubleRegister) {
401        __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
402      } else {
403        __ ucomisd(i.InputDoubleRegister(0), input.operand);
404      }
405      break;
406    }
407    case kSSEFloat64Add:
408      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
409      break;
410    case kSSEFloat64Sub:
411      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
412      break;
413    case kSSEFloat64Mul:
414      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
415      break;
416    case kSSEFloat64Div:
417      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
418      break;
419    case kSSEFloat64Mod: {
420      __ subq(rsp, Immediate(kDoubleSize));
421      // Move values to st(0) and st(1).
422      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
423      __ fld_d(Operand(rsp, 0));
424      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
425      __ fld_d(Operand(rsp, 0));
426      // Loop while fprem isn't done.
427      Label mod_loop;
428      __ bind(&mod_loop);
429      // This instructions traps on all kinds inputs, but we are assuming the
430      // floating point control word is set to ignore them all.
431      __ fprem();
432      // The following 2 instruction implicitly use rax.
433      __ fnstsw_ax();
434      if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
435        __ sahf();
436      } else {
437        __ shrl(rax, Immediate(8));
438        __ andl(rax, Immediate(0xFF));
439        __ pushq(rax);
440        __ popfq();
441      }
442      __ j(parity_even, &mod_loop);
443      // Move output to stack and clean up.
444      __ fstp(1);
445      __ fstp_d(Operand(rsp, 0));
446      __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
447      __ addq(rsp, Immediate(kDoubleSize));
448      break;
449    }
450    case kSSEFloat64Sqrt: {
451      RegisterOrOperand input = i.InputRegisterOrOperand(0);
452      if (input.type == kDoubleRegister) {
453        __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
454      } else {
455        __ sqrtsd(i.OutputDoubleRegister(), input.operand);
456      }
457      break;
458    }
459    case kSSEFloat64ToInt32: {
460      RegisterOrOperand input = i.InputRegisterOrOperand(0);
461      if (input.type == kDoubleRegister) {
462        __ cvttsd2si(i.OutputRegister(), input.double_reg);
463      } else {
464        __ cvttsd2si(i.OutputRegister(), input.operand);
465      }
466      break;
467    }
468    case kSSEFloat64ToUint32: {
469      RegisterOrOperand input = i.InputRegisterOrOperand(0);
470      if (input.type == kDoubleRegister) {
471        __ cvttsd2siq(i.OutputRegister(), input.double_reg);
472      } else {
473        __ cvttsd2siq(i.OutputRegister(), input.operand);
474      }
475      __ andl(i.OutputRegister(), i.OutputRegister());  // clear upper bits.
476      // TODO(turbofan): generated code should not look at the upper 32 bits
477      // of the result, but those bits could escape to the outside world.
478      break;
479    }
480    case kSSEInt32ToFloat64: {
481      RegisterOrOperand input = i.InputRegisterOrOperand(0);
482      if (input.type == kRegister) {
483        __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
484      } else {
485        __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
486      }
487      break;
488    }
489    case kSSEUint32ToFloat64: {
490      // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
491      __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
492      break;
493    }
494    case kX64Movsxbl:
495      __ movsxbl(i.OutputRegister(), i.MemoryOperand());
496      break;
497    case kX64Movzxbl:
498      __ movzxbl(i.OutputRegister(), i.MemoryOperand());
499      break;
500    case kX64Movb: {
501      int index = 0;
502      Operand operand = i.MemoryOperand(&index);
503      if (HasImmediateInput(instr, index)) {
504        __ movb(operand, Immediate(i.InputInt8(index)));
505      } else {
506        __ movb(operand, i.InputRegister(index));
507      }
508      break;
509    }
510    case kX64Movsxwl:
511      __ movsxwl(i.OutputRegister(), i.MemoryOperand());
512      break;
513    case kX64Movzxwl:
514      __ movzxwl(i.OutputRegister(), i.MemoryOperand());
515      break;
516    case kX64Movw: {
517      int index = 0;
518      Operand operand = i.MemoryOperand(&index);
519      if (HasImmediateInput(instr, index)) {
520        __ movw(operand, Immediate(i.InputInt16(index)));
521      } else {
522        __ movw(operand, i.InputRegister(index));
523      }
524      break;
525    }
526    case kX64Movl:
527      if (instr->HasOutput()) {
528        if (instr->addressing_mode() == kMode_None) {
529          RegisterOrOperand input = i.InputRegisterOrOperand(0);
530          if (input.type == kRegister) {
531            __ movl(i.OutputRegister(), input.reg);
532          } else {
533            __ movl(i.OutputRegister(), input.operand);
534          }
535        } else {
536          __ movl(i.OutputRegister(), i.MemoryOperand());
537        }
538      } else {
539        int index = 0;
540        Operand operand = i.MemoryOperand(&index);
541        if (HasImmediateInput(instr, index)) {
542          __ movl(operand, i.InputImmediate(index));
543        } else {
544          __ movl(operand, i.InputRegister(index));
545        }
546      }
547      break;
548    case kX64Movsxlq: {
549      RegisterOrOperand input = i.InputRegisterOrOperand(0);
550      if (input.type == kRegister) {
551        __ movsxlq(i.OutputRegister(), input.reg);
552      } else {
553        __ movsxlq(i.OutputRegister(), input.operand);
554      }
555      break;
556    }
557    case kX64Movq:
558      if (instr->HasOutput()) {
559        __ movq(i.OutputRegister(), i.MemoryOperand());
560      } else {
561        int index = 0;
562        Operand operand = i.MemoryOperand(&index);
563        if (HasImmediateInput(instr, index)) {
564          __ movq(operand, i.InputImmediate(index));
565        } else {
566          __ movq(operand, i.InputRegister(index));
567        }
568      }
569      break;
570    case kX64Movss:
571      if (instr->HasOutput()) {
572        __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
573        __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
574      } else {
575        int index = 0;
576        Operand operand = i.MemoryOperand(&index);
577        __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
578        __ movss(operand, xmm0);
579      }
580      break;
581    case kX64Movsd:
582      if (instr->HasOutput()) {
583        __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
584      } else {
585        int index = 0;
586        Operand operand = i.MemoryOperand(&index);
587        __ movsd(operand, i.InputDoubleRegister(index));
588      }
589      break;
590    case kX64Push:
591      if (HasImmediateInput(instr, 0)) {
592        __ pushq(i.InputImmediate(0));
593      } else {
594        RegisterOrOperand input = i.InputRegisterOrOperand(0);
595        if (input.type == kRegister) {
596          __ pushq(input.reg);
597        } else {
598          __ pushq(input.operand);
599        }
600      }
601      break;
602    case kX64StoreWriteBarrier: {
603      Register object = i.InputRegister(0);
604      Register index = i.InputRegister(1);
605      Register value = i.InputRegister(2);
606      __ movsxlq(index, index);
607      __ movq(Operand(object, index, times_1, 0), value);
608      __ leaq(index, Operand(object, index, times_1, 0));
609      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
610                                ? kSaveFPRegs
611                                : kDontSaveFPRegs;
612      __ RecordWrite(object, index, value, mode);
613      break;
614    }
615  }
616}
617
618
619// Assembles branches after this instruction.
620void CodeGenerator::AssembleArchBranch(Instruction* instr,
621                                       FlagsCondition condition) {
622  X64OperandConverter i(this, instr);
623  Label done;
624
625  // Emit a branch. The true and false targets are always the last two inputs
626  // to the instruction.
627  BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
628  BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
629  bool fallthru = IsNextInAssemblyOrder(fblock);
630  Label* tlabel = code()->GetLabel(tblock);
631  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
632  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
633  switch (condition) {
634    case kUnorderedEqual:
635      __ j(parity_even, flabel, flabel_distance);
636    // Fall through.
637    case kEqual:
638      __ j(equal, tlabel);
639      break;
640    case kUnorderedNotEqual:
641      __ j(parity_even, tlabel);
642    // Fall through.
643    case kNotEqual:
644      __ j(not_equal, tlabel);
645      break;
646    case kSignedLessThan:
647      __ j(less, tlabel);
648      break;
649    case kSignedGreaterThanOrEqual:
650      __ j(greater_equal, tlabel);
651      break;
652    case kSignedLessThanOrEqual:
653      __ j(less_equal, tlabel);
654      break;
655    case kSignedGreaterThan:
656      __ j(greater, tlabel);
657      break;
658    case kUnorderedLessThan:
659      __ j(parity_even, flabel, flabel_distance);
660    // Fall through.
661    case kUnsignedLessThan:
662      __ j(below, tlabel);
663      break;
664    case kUnorderedGreaterThanOrEqual:
665      __ j(parity_even, tlabel);
666    // Fall through.
667    case kUnsignedGreaterThanOrEqual:
668      __ j(above_equal, tlabel);
669      break;
670    case kUnorderedLessThanOrEqual:
671      __ j(parity_even, flabel, flabel_distance);
672    // Fall through.
673    case kUnsignedLessThanOrEqual:
674      __ j(below_equal, tlabel);
675      break;
676    case kUnorderedGreaterThan:
677      __ j(parity_even, tlabel);
678    // Fall through.
679    case kUnsignedGreaterThan:
680      __ j(above, tlabel);
681      break;
682    case kOverflow:
683      __ j(overflow, tlabel);
684      break;
685    case kNotOverflow:
686      __ j(no_overflow, tlabel);
687      break;
688  }
689  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
690  __ bind(&done);
691}
692
693
694// Assembles boolean materializations after this instruction.
695void CodeGenerator::AssembleArchBoolean(Instruction* instr,
696                                        FlagsCondition condition) {
697  X64OperandConverter i(this, instr);
698  Label done;
699
700  // Materialize a full 64-bit 1 or 0 value. The result register is always the
701  // last output of the instruction.
702  Label check;
703  DCHECK_NE(0, instr->OutputCount());
704  Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
705  Condition cc = no_condition;
706  switch (condition) {
707    case kUnorderedEqual:
708      __ j(parity_odd, &check, Label::kNear);
709      __ movl(reg, Immediate(0));
710      __ jmp(&done, Label::kNear);
711    // Fall through.
712    case kEqual:
713      cc = equal;
714      break;
715    case kUnorderedNotEqual:
716      __ j(parity_odd, &check, Label::kNear);
717      __ movl(reg, Immediate(1));
718      __ jmp(&done, Label::kNear);
719    // Fall through.
720    case kNotEqual:
721      cc = not_equal;
722      break;
723    case kSignedLessThan:
724      cc = less;
725      break;
726    case kSignedGreaterThanOrEqual:
727      cc = greater_equal;
728      break;
729    case kSignedLessThanOrEqual:
730      cc = less_equal;
731      break;
732    case kSignedGreaterThan:
733      cc = greater;
734      break;
735    case kUnorderedLessThan:
736      __ j(parity_odd, &check, Label::kNear);
737      __ movl(reg, Immediate(0));
738      __ jmp(&done, Label::kNear);
739    // Fall through.
740    case kUnsignedLessThan:
741      cc = below;
742      break;
743    case kUnorderedGreaterThanOrEqual:
744      __ j(parity_odd, &check, Label::kNear);
745      __ movl(reg, Immediate(1));
746      __ jmp(&done, Label::kNear);
747    // Fall through.
748    case kUnsignedGreaterThanOrEqual:
749      cc = above_equal;
750      break;
751    case kUnorderedLessThanOrEqual:
752      __ j(parity_odd, &check, Label::kNear);
753      __ movl(reg, Immediate(0));
754      __ jmp(&done, Label::kNear);
755    // Fall through.
756    case kUnsignedLessThanOrEqual:
757      cc = below_equal;
758      break;
759    case kUnorderedGreaterThan:
760      __ j(parity_odd, &check, Label::kNear);
761      __ movl(reg, Immediate(1));
762      __ jmp(&done, Label::kNear);
763    // Fall through.
764    case kUnsignedGreaterThan:
765      cc = above;
766      break;
767    case kOverflow:
768      cc = overflow;
769      break;
770    case kNotOverflow:
771      cc = no_overflow;
772      break;
773  }
774  __ bind(&check);
775  __ setcc(cc, reg);
776  __ movzxbl(reg, reg);
777  __ bind(&done);
778}
779
780
781void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
782  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
783      isolate(), deoptimization_id, Deoptimizer::LAZY);
784  __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
785}
786
787
788void CodeGenerator::AssemblePrologue() {
789  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
790  int stack_slots = frame()->GetSpillSlotCount();
791  if (descriptor->kind() == CallDescriptor::kCallAddress) {
792    __ pushq(rbp);
793    __ movq(rbp, rsp);
794    const RegList saves = descriptor->CalleeSavedRegisters();
795    if (saves != 0) {  // Save callee-saved registers.
796      int register_save_area_size = 0;
797      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
798        if (!((1 << i) & saves)) continue;
799        __ pushq(Register::from_code(i));
800        register_save_area_size += kPointerSize;
801      }
802      frame()->SetRegisterSaveAreaSize(register_save_area_size);
803    }
804  } else if (descriptor->IsJSFunctionCall()) {
805    CompilationInfo* info = linkage()->info();
806    __ Prologue(info->IsCodePreAgingActive());
807    frame()->SetRegisterSaveAreaSize(
808        StandardFrameConstants::kFixedFrameSizeFromFp);
809
810    // Sloppy mode functions and builtins need to replace the receiver with the
811    // global proxy when called as functions (without an explicit receiver
812    // object).
813    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
814    if (info->strict_mode() == SLOPPY && !info->is_native()) {
815      Label ok;
816      StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
817      __ movp(rcx, args.GetReceiverOperand());
818      __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
819      __ j(not_equal, &ok, Label::kNear);
820      __ movp(rcx, GlobalObjectOperand());
821      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
822      __ movp(args.GetReceiverOperand(), rcx);
823      __ bind(&ok);
824    }
825
826  } else {
827    __ StubPrologue();
828    frame()->SetRegisterSaveAreaSize(
829        StandardFrameConstants::kFixedFrameSizeFromFp);
830  }
831  if (stack_slots > 0) {
832    __ subq(rsp, Immediate(stack_slots * kPointerSize));
833  }
834}
835
836
837void CodeGenerator::AssembleReturn() {
838  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
839  if (descriptor->kind() == CallDescriptor::kCallAddress) {
840    if (frame()->GetRegisterSaveAreaSize() > 0) {
841      // Remove this frame's spill slots first.
842      int stack_slots = frame()->GetSpillSlotCount();
843      if (stack_slots > 0) {
844        __ addq(rsp, Immediate(stack_slots * kPointerSize));
845      }
846      const RegList saves = descriptor->CalleeSavedRegisters();
847      // Restore registers.
848      if (saves != 0) {
849        for (int i = 0; i < Register::kNumRegisters; i++) {
850          if (!((1 << i) & saves)) continue;
851          __ popq(Register::from_code(i));
852        }
853      }
854      __ popq(rbp);  // Pop caller's frame pointer.
855      __ ret(0);
856    } else {
857      // No saved registers.
858      __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
859      __ popq(rbp);       // Pop caller's frame pointer.
860      __ ret(0);
861    }
862  } else {
863    __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
864    __ popq(rbp);       // Pop caller's frame pointer.
865    int pop_count = descriptor->IsJSFunctionCall()
866                        ? static_cast<int>(descriptor->JSParameterCount())
867                        : 0;
868    __ ret(pop_count * kPointerSize);
869  }
870}
871
872
873void CodeGenerator::AssembleMove(InstructionOperand* source,
874                                 InstructionOperand* destination) {
875  X64OperandConverter g(this, NULL);
876  // Dispatch on the source and destination operand kinds.  Not all
877  // combinations are possible.
878  if (source->IsRegister()) {
879    DCHECK(destination->IsRegister() || destination->IsStackSlot());
880    Register src = g.ToRegister(source);
881    if (destination->IsRegister()) {
882      __ movq(g.ToRegister(destination), src);
883    } else {
884      __ movq(g.ToOperand(destination), src);
885    }
886  } else if (source->IsStackSlot()) {
887    DCHECK(destination->IsRegister() || destination->IsStackSlot());
888    Operand src = g.ToOperand(source);
889    if (destination->IsRegister()) {
890      Register dst = g.ToRegister(destination);
891      __ movq(dst, src);
892    } else {
893      // Spill on demand to use a temporary register for memory-to-memory
894      // moves.
895      Register tmp = kScratchRegister;
896      Operand dst = g.ToOperand(destination);
897      __ movq(tmp, src);
898      __ movq(dst, tmp);
899    }
900  } else if (source->IsConstant()) {
901    ConstantOperand* constant_source = ConstantOperand::cast(source);
902    if (destination->IsRegister() || destination->IsStackSlot()) {
903      Register dst = destination->IsRegister() ? g.ToRegister(destination)
904                                               : kScratchRegister;
905      Immediate64 imm = g.ToImmediate64(constant_source);
906      switch (imm.type) {
907        case kImm64Value:
908          __ Set(dst, imm.value);
909          break;
910        case kImm64Reference:
911          __ Move(dst, imm.reference);
912          break;
913        case kImm64Handle:
914          __ Move(dst, imm.handle);
915          break;
916      }
917      if (destination->IsStackSlot()) {
918        __ movq(g.ToOperand(destination), kScratchRegister);
919      }
920    } else {
921      __ movq(kScratchRegister,
922              bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
923      if (destination->IsDoubleRegister()) {
924        __ movq(g.ToDoubleRegister(destination), kScratchRegister);
925      } else {
926        DCHECK(destination->IsDoubleStackSlot());
927        __ movq(g.ToOperand(destination), kScratchRegister);
928      }
929    }
930  } else if (source->IsDoubleRegister()) {
931    XMMRegister src = g.ToDoubleRegister(source);
932    if (destination->IsDoubleRegister()) {
933      XMMRegister dst = g.ToDoubleRegister(destination);
934      __ movsd(dst, src);
935    } else {
936      DCHECK(destination->IsDoubleStackSlot());
937      Operand dst = g.ToOperand(destination);
938      __ movsd(dst, src);
939    }
940  } else if (source->IsDoubleStackSlot()) {
941    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
942    Operand src = g.ToOperand(source);
943    if (destination->IsDoubleRegister()) {
944      XMMRegister dst = g.ToDoubleRegister(destination);
945      __ movsd(dst, src);
946    } else {
947      // We rely on having xmm0 available as a fixed scratch register.
948      Operand dst = g.ToOperand(destination);
949      __ movsd(xmm0, src);
950      __ movsd(dst, xmm0);
951    }
952  } else {
953    UNREACHABLE();
954  }
955}
956
957
958void CodeGenerator::AssembleSwap(InstructionOperand* source,
959                                 InstructionOperand* destination) {
960  X64OperandConverter g(this, NULL);
961  // Dispatch on the source and destination operand kinds.  Not all
962  // combinations are possible.
963  if (source->IsRegister() && destination->IsRegister()) {
964    // Register-register.
965    __ xchgq(g.ToRegister(source), g.ToRegister(destination));
966  } else if (source->IsRegister() && destination->IsStackSlot()) {
967    Register src = g.ToRegister(source);
968    Operand dst = g.ToOperand(destination);
969    __ xchgq(src, dst);
970  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
971             (source->IsDoubleStackSlot() &&
972              destination->IsDoubleStackSlot())) {
973    // Memory-memory.
974    Register tmp = kScratchRegister;
975    Operand src = g.ToOperand(source);
976    Operand dst = g.ToOperand(destination);
977    __ movq(tmp, dst);
978    __ xchgq(tmp, src);
979    __ movq(dst, tmp);
980  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
981    // XMM register-register swap. We rely on having xmm0
982    // available as a fixed scratch register.
983    XMMRegister src = g.ToDoubleRegister(source);
984    XMMRegister dst = g.ToDoubleRegister(destination);
985    __ movsd(xmm0, src);
986    __ movsd(src, dst);
987    __ movsd(dst, xmm0);
988  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
989    // XMM register-memory swap.  We rely on having xmm0
990    // available as a fixed scratch register.
991    XMMRegister src = g.ToDoubleRegister(source);
992    Operand dst = g.ToOperand(destination);
993    __ movsd(xmm0, src);
994    __ movsd(src, dst);
995    __ movsd(dst, xmm0);
996  } else {
997    // No other combinations are possible.
998    UNREACHABLE();
999  }
1000}
1001
1002
1003void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
1004
1005
1006void CodeGenerator::EnsureSpaceForLazyDeopt() {
1007  int space_needed = Deoptimizer::patch_size();
1008  if (!linkage()->info()->IsStub()) {
1009    // Ensure that we have enough space after the previous lazy-bailout
1010    // instruction for patching the code here.
1011    int current_pc = masm()->pc_offset();
1012    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1013      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1014      __ Nop(padding_size);
1015    }
1016  }
1017  MarkLazyDeoptSite();
1018}
1019
1020#undef __
1021
1022}  // namespace internal
1023}  // namespace compiler
1024}  // namespace v8
1025