code-generator-x64.cc revision 3b9bc31999c9787eb726ecdbfd5796bfdec32a18
1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/ast/scopes.h"
8#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
11#include "src/compiler/osr.h"
12#include "src/x64/assembler-x64.h"
13#include "src/x64/macro-assembler-x64.h"
14
15namespace v8 {
16namespace internal {
17namespace compiler {
18
19#define __ masm()->
20
21
22#define kScratchDoubleReg xmm0
23
24
25// Adds X64 specific methods for decoding operands.
26class X64OperandConverter : public InstructionOperandConverter {
27 public:
28  X64OperandConverter(CodeGenerator* gen, Instruction* instr)
29      : InstructionOperandConverter(gen, instr) {}
30
31  Immediate InputImmediate(size_t index) {
32    return ToImmediate(instr_->InputAt(index));
33  }
34
35  Operand InputOperand(size_t index, int extra = 0) {
36    return ToOperand(instr_->InputAt(index), extra);
37  }
38
39  Operand OutputOperand() { return ToOperand(instr_->Output()); }
40
41  Immediate ToImmediate(InstructionOperand* operand) {
42    Constant constant = ToConstant(operand);
43    if (constant.type() == Constant::kFloat64) {
44      DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
45      return Immediate(0);
46    }
47    return Immediate(constant.ToInt32());
48  }
49
50  Operand ToOperand(InstructionOperand* op, int extra = 0) {
51    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
52    return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
53  }
54
55  Operand SlotToOperand(int slot_index, int extra = 0) {
56    FrameOffset offset = frame_access_state()->GetFrameOffset(slot_index);
57    return Operand(offset.from_stack_pointer() ? rsp : rbp,
58                   offset.offset() + extra);
59  }
60
61  static size_t NextOffset(size_t* offset) {
62    size_t i = *offset;
63    (*offset)++;
64    return i;
65  }
66
67  static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
68    STATIC_ASSERT(0 == static_cast<int>(times_1));
69    STATIC_ASSERT(1 == static_cast<int>(times_2));
70    STATIC_ASSERT(2 == static_cast<int>(times_4));
71    STATIC_ASSERT(3 == static_cast<int>(times_8));
72    int scale = static_cast<int>(mode - one);
73    DCHECK(scale >= 0 && scale < 4);
74    return static_cast<ScaleFactor>(scale);
75  }
76
77  Operand MemoryOperand(size_t* offset) {
78    AddressingMode mode = AddressingModeField::decode(instr_->opcode());
79    switch (mode) {
80      case kMode_MR: {
81        Register base = InputRegister(NextOffset(offset));
82        int32_t disp = 0;
83        return Operand(base, disp);
84      }
85      case kMode_MRI: {
86        Register base = InputRegister(NextOffset(offset));
87        int32_t disp = InputInt32(NextOffset(offset));
88        return Operand(base, disp);
89      }
90      case kMode_MR1:
91      case kMode_MR2:
92      case kMode_MR4:
93      case kMode_MR8: {
94        Register base = InputRegister(NextOffset(offset));
95        Register index = InputRegister(NextOffset(offset));
96        ScaleFactor scale = ScaleFor(kMode_MR1, mode);
97        int32_t disp = 0;
98        return Operand(base, index, scale, disp);
99      }
100      case kMode_MR1I:
101      case kMode_MR2I:
102      case kMode_MR4I:
103      case kMode_MR8I: {
104        Register base = InputRegister(NextOffset(offset));
105        Register index = InputRegister(NextOffset(offset));
106        ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
107        int32_t disp = InputInt32(NextOffset(offset));
108        return Operand(base, index, scale, disp);
109      }
110      case kMode_M1: {
111        Register base = InputRegister(NextOffset(offset));
112        int32_t disp = 0;
113        return Operand(base, disp);
114      }
115      case kMode_M2:
116        UNREACHABLE();  // Should use kModeMR with more compact encoding instead
117        return Operand(no_reg, 0);
118      case kMode_M4:
119      case kMode_M8: {
120        Register index = InputRegister(NextOffset(offset));
121        ScaleFactor scale = ScaleFor(kMode_M1, mode);
122        int32_t disp = 0;
123        return Operand(index, scale, disp);
124      }
125      case kMode_M1I:
126      case kMode_M2I:
127      case kMode_M4I:
128      case kMode_M8I: {
129        Register index = InputRegister(NextOffset(offset));
130        ScaleFactor scale = ScaleFor(kMode_M1I, mode);
131        int32_t disp = InputInt32(NextOffset(offset));
132        return Operand(index, scale, disp);
133      }
134      case kMode_None:
135        UNREACHABLE();
136        return Operand(no_reg, 0);
137    }
138    UNREACHABLE();
139    return Operand(no_reg, 0);
140  }
141
142  Operand MemoryOperand(size_t first_input = 0) {
143    return MemoryOperand(&first_input);
144  }
145};
146
147
148namespace {
149
150bool HasImmediateInput(Instruction* instr, size_t index) {
151  return instr->InputAt(index)->IsImmediate();
152}
153
154
155class OutOfLineLoadZero final : public OutOfLineCode {
156 public:
157  OutOfLineLoadZero(CodeGenerator* gen, Register result)
158      : OutOfLineCode(gen), result_(result) {}
159
160  void Generate() final { __ xorl(result_, result_); }
161
162 private:
163  Register const result_;
164};
165
166
167class OutOfLineLoadNaN final : public OutOfLineCode {
168 public:
169  OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
170      : OutOfLineCode(gen), result_(result) {}
171
172  void Generate() final { __ Pcmpeqd(result_, result_); }
173
174 private:
175  XMMRegister const result_;
176};
177
178
179class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
180 public:
181  OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
182                             XMMRegister input)
183      : OutOfLineCode(gen), result_(result), input_(input) {}
184
185  void Generate() final {
186    __ subp(rsp, Immediate(kDoubleSize));
187    __ Movsd(MemOperand(rsp, 0), input_);
188    __ SlowTruncateToI(result_, rsp, 0);
189    __ addp(rsp, Immediate(kDoubleSize));
190  }
191
192 private:
193  Register const result_;
194  XMMRegister const input_;
195};
196
197
198class OutOfLineRecordWrite final : public OutOfLineCode {
199 public:
200  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
201                       Register value, Register scratch0, Register scratch1,
202                       RecordWriteMode mode)
203      : OutOfLineCode(gen),
204        object_(object),
205        operand_(operand),
206        value_(value),
207        scratch0_(scratch0),
208        scratch1_(scratch1),
209        mode_(mode) {}
210
211  void Generate() final {
212    if (mode_ > RecordWriteMode::kValueIsPointer) {
213      __ JumpIfSmi(value_, exit());
214    }
215    __ CheckPageFlag(value_, scratch0_,
216                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
217                     exit());
218    RememberedSetAction const remembered_set_action =
219        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
220                                             : OMIT_REMEMBERED_SET;
221    SaveFPRegsMode const save_fp_mode =
222        frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
223    RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
224                         remembered_set_action, save_fp_mode);
225    __ leap(scratch1_, operand_);
226    __ CallStub(&stub);
227  }
228
229 private:
230  Register const object_;
231  Operand const operand_;
232  Register const value_;
233  Register const scratch0_;
234  Register const scratch1_;
235  RecordWriteMode const mode_;
236};
237
238}  // namespace
239
240
241#define ASSEMBLE_UNOP(asm_instr)         \
242  do {                                   \
243    if (instr->Output()->IsRegister()) { \
244      __ asm_instr(i.OutputRegister());  \
245    } else {                             \
246      __ asm_instr(i.OutputOperand());   \
247    }                                    \
248  } while (0)
249
250
251#define ASSEMBLE_BINOP(asm_instr)                              \
252  do {                                                         \
253    if (HasImmediateInput(instr, 1)) {                         \
254      if (instr->InputAt(0)->IsRegister()) {                   \
255        __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
256      } else {                                                 \
257        __ asm_instr(i.InputOperand(0), i.InputImmediate(1));  \
258      }                                                        \
259    } else {                                                   \
260      if (instr->InputAt(1)->IsRegister()) {                   \
261        __ asm_instr(i.InputRegister(0), i.InputRegister(1));  \
262      } else {                                                 \
263        __ asm_instr(i.InputRegister(0), i.InputOperand(1));   \
264      }                                                        \
265    }                                                          \
266  } while (0)
267
268#define ASSEMBLE_COMPARE(asm_instr)                                   \
269  do {                                                                \
270    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
271      size_t index = 0;                                               \
272      Operand left = i.MemoryOperand(&index);                         \
273      if (HasImmediateInput(instr, index)) {                          \
274        __ asm_instr(left, i.InputImmediate(index));                  \
275      } else {                                                        \
276        __ asm_instr(left, i.InputRegister(index));                   \
277      }                                                               \
278    } else {                                                          \
279      if (HasImmediateInput(instr, 1)) {                              \
280        if (instr->InputAt(0)->IsRegister()) {                        \
281          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
282        } else {                                                      \
283          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
284        }                                                             \
285      } else {                                                        \
286        if (instr->InputAt(1)->IsRegister()) {                        \
287          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
288        } else {                                                      \
289          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
290        }                                                             \
291      }                                                               \
292    }                                                                 \
293  } while (0)
294
295#define ASSEMBLE_MULT(asm_instr)                              \
296  do {                                                        \
297    if (HasImmediateInput(instr, 1)) {                        \
298      if (instr->InputAt(0)->IsRegister()) {                  \
299        __ asm_instr(i.OutputRegister(), i.InputRegister(0),  \
300                     i.InputImmediate(1));                    \
301      } else {                                                \
302        __ asm_instr(i.OutputRegister(), i.InputOperand(0),   \
303                     i.InputImmediate(1));                    \
304      }                                                       \
305    } else {                                                  \
306      if (instr->InputAt(1)->IsRegister()) {                  \
307        __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
308      } else {                                                \
309        __ asm_instr(i.OutputRegister(), i.InputOperand(1));  \
310      }                                                       \
311    }                                                         \
312  } while (0)
313
314
315#define ASSEMBLE_SHIFT(asm_instr, width)                                   \
316  do {                                                                     \
317    if (HasImmediateInput(instr, 1)) {                                     \
318      if (instr->Output()->IsRegister()) {                                 \
319        __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
320      } else {                                                             \
321        __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
322      }                                                                    \
323    } else {                                                               \
324      if (instr->Output()->IsRegister()) {                                 \
325        __ asm_instr##_cl(i.OutputRegister());                             \
326      } else {                                                             \
327        __ asm_instr##_cl(i.OutputOperand());                              \
328      }                                                                    \
329    }                                                                      \
330  } while (0)
331
332
333#define ASSEMBLE_MOVX(asm_instr)                            \
334  do {                                                      \
335    if (instr->addressing_mode() != kMode_None) {           \
336      __ asm_instr(i.OutputRegister(), i.MemoryOperand());  \
337    } else if (instr->InputAt(0)->IsRegister()) {           \
338      __ asm_instr(i.OutputRegister(), i.InputRegister(0)); \
339    } else {                                                \
340      __ asm_instr(i.OutputRegister(), i.InputOperand(0));  \
341    }                                                       \
342  } while (0)
343
344
345#define ASSEMBLE_SSE_BINOP(asm_instr)                                   \
346  do {                                                                  \
347    if (instr->InputAt(1)->IsDoubleRegister()) {                        \
348      __ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
349    } else {                                                            \
350      __ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1));        \
351    }                                                                   \
352  } while (0)
353
354
355#define ASSEMBLE_SSE_UNOP(asm_instr)                                    \
356  do {                                                                  \
357    if (instr->InputAt(0)->IsDoubleRegister()) {                        \
358      __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
359    } else {                                                            \
360      __ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0));        \
361    }                                                                   \
362  } while (0)
363
364
365#define ASSEMBLE_AVX_BINOP(asm_instr)                                  \
366  do {                                                                 \
367    CpuFeatureScope avx_scope(masm(), AVX);                            \
368    if (instr->InputAt(1)->IsDoubleRegister()) {                       \
369      __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
370                   i.InputDoubleRegister(1));                          \
371    } else {                                                           \
372      __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
373                   i.InputOperand(1));                                 \
374    }                                                                  \
375  } while (0)
376
377
378#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
379  do {                                                                       \
380    auto result = i.OutputDoubleRegister();                                  \
381    auto buffer = i.InputRegister(0);                                        \
382    auto index1 = i.InputRegister(1);                                        \
383    auto index2 = i.InputInt32(2);                                           \
384    OutOfLineCode* ool;                                                      \
385    if (instr->InputAt(3)->IsRegister()) {                                   \
386      auto length = i.InputRegister(3);                                      \
387      DCHECK_EQ(0, index2);                                                  \
388      __ cmpl(index1, length);                                               \
389      ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
390    } else {                                                                 \
391      auto length = i.InputInt32(3);                                         \
392      DCHECK_LE(index2, length);                                             \
393      __ cmpq(index1, Immediate(length - index2));                           \
394      class OutOfLineLoadFloat final : public OutOfLineCode {                \
395       public:                                                               \
396        OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
397                           Register buffer, Register index1, int32_t index2, \
398                           int32_t length)                                   \
399            : OutOfLineCode(gen),                                            \
400              result_(result),                                               \
401              buffer_(buffer),                                               \
402              index1_(index1),                                               \
403              index2_(index2),                                               \
404              length_(length) {}                                             \
405                                                                             \
406        void Generate() final {                                              \
407          __ leal(kScratchRegister, Operand(index1_, index2_));              \
408          __ Pcmpeqd(result_, result_);                                      \
409          __ cmpl(kScratchRegister, Immediate(length_));                     \
410          __ j(above_equal, exit());                                         \
411          __ asm_instr(result_,                                              \
412                       Operand(buffer_, kScratchRegister, times_1, 0));      \
413        }                                                                    \
414                                                                             \
415       private:                                                              \
416        XMMRegister const result_;                                           \
417        Register const buffer_;                                              \
418        Register const index1_;                                              \
419        int32_t const index2_;                                               \
420        int32_t const length_;                                               \
421      };                                                                     \
422      ool = new (zone())                                                     \
423          OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
424    }                                                                        \
425    __ j(above_equal, ool->entry());                                         \
426    __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
427    __ bind(ool->exit());                                                    \
428  } while (false)
429
430
431#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
432  do {                                                                         \
433    auto result = i.OutputRegister();                                          \
434    auto buffer = i.InputRegister(0);                                          \
435    auto index1 = i.InputRegister(1);                                          \
436    auto index2 = i.InputInt32(2);                                             \
437    OutOfLineCode* ool;                                                        \
438    if (instr->InputAt(3)->IsRegister()) {                                     \
439      auto length = i.InputRegister(3);                                        \
440      DCHECK_EQ(0, index2);                                                    \
441      __ cmpl(index1, length);                                                 \
442      ool = new (zone()) OutOfLineLoadZero(this, result);                      \
443    } else {                                                                   \
444      auto length = i.InputInt32(3);                                           \
445      DCHECK_LE(index2, length);                                               \
446      __ cmpq(index1, Immediate(length - index2));                             \
447      class OutOfLineLoadInteger final : public OutOfLineCode {                \
448       public:                                                                 \
449        OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
450                             Register buffer, Register index1, int32_t index2, \
451                             int32_t length)                                   \
452            : OutOfLineCode(gen),                                              \
453              result_(result),                                                 \
454              buffer_(buffer),                                                 \
455              index1_(index1),                                                 \
456              index2_(index2),                                                 \
457              length_(length) {}                                               \
458                                                                               \
459        void Generate() final {                                                \
460          Label oob;                                                           \
461          __ leal(kScratchRegister, Operand(index1_, index2_));                \
462          __ cmpl(kScratchRegister, Immediate(length_));                       \
463          __ j(above_equal, &oob, Label::kNear);                               \
464          __ asm_instr(result_,                                                \
465                       Operand(buffer_, kScratchRegister, times_1, 0));        \
466          __ jmp(exit());                                                      \
467          __ bind(&oob);                                                       \
468          __ xorl(result_, result_);                                           \
469        }                                                                      \
470                                                                               \
471       private:                                                                \
472        Register const result_;                                                \
473        Register const buffer_;                                                \
474        Register const index1_;                                                \
475        int32_t const index2_;                                                 \
476        int32_t const length_;                                                 \
477      };                                                                       \
478      ool = new (zone())                                                       \
479          OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
480    }                                                                          \
481    __ j(above_equal, ool->entry());                                           \
482    __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
483    __ bind(ool->exit());                                                      \
484  } while (false)
485
486
487#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
488  do {                                                                       \
489    auto buffer = i.InputRegister(0);                                        \
490    auto index1 = i.InputRegister(1);                                        \
491    auto index2 = i.InputInt32(2);                                           \
492    auto value = i.InputDoubleRegister(4);                                   \
493    if (instr->InputAt(3)->IsRegister()) {                                   \
494      auto length = i.InputRegister(3);                                      \
495      DCHECK_EQ(0, index2);                                                  \
496      Label done;                                                            \
497      __ cmpl(index1, length);                                               \
498      __ j(above_equal, &done, Label::kNear);                                \
499      __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
500      __ bind(&done);                                                        \
501    } else {                                                                 \
502      auto length = i.InputInt32(3);                                         \
503      DCHECK_LE(index2, length);                                             \
504      __ cmpq(index1, Immediate(length - index2));                           \
505      class OutOfLineStoreFloat final : public OutOfLineCode {               \
506       public:                                                               \
507        OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
508                            Register index1, int32_t index2, int32_t length, \
509                            XMMRegister value)                               \
510            : OutOfLineCode(gen),                                            \
511              buffer_(buffer),                                               \
512              index1_(index1),                                               \
513              index2_(index2),                                               \
514              length_(length),                                               \
515              value_(value) {}                                               \
516                                                                             \
517        void Generate() final {                                              \
518          __ leal(kScratchRegister, Operand(index1_, index2_));              \
519          __ cmpl(kScratchRegister, Immediate(length_));                     \
520          __ j(above_equal, exit());                                         \
521          __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
522                       value_);                                              \
523        }                                                                    \
524                                                                             \
525       private:                                                              \
526        Register const buffer_;                                              \
527        Register const index1_;                                              \
528        int32_t const index2_;                                               \
529        int32_t const length_;                                               \
530        XMMRegister const value_;                                            \
531      };                                                                     \
532      auto ool = new (zone())                                                \
533          OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
534      __ j(above_equal, ool->entry());                                       \
535      __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
536      __ bind(ool->exit());                                                  \
537    }                                                                        \
538  } while (false)
539
540
541#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
542  do {                                                                         \
543    auto buffer = i.InputRegister(0);                                          \
544    auto index1 = i.InputRegister(1);                                          \
545    auto index2 = i.InputInt32(2);                                             \
546    if (instr->InputAt(3)->IsRegister()) {                                     \
547      auto length = i.InputRegister(3);                                        \
548      DCHECK_EQ(0, index2);                                                    \
549      Label done;                                                              \
550      __ cmpl(index1, length);                                                 \
551      __ j(above_equal, &done, Label::kNear);                                  \
552      __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
553      __ bind(&done);                                                          \
554    } else {                                                                   \
555      auto length = i.InputInt32(3);                                           \
556      DCHECK_LE(index2, length);                                               \
557      __ cmpq(index1, Immediate(length - index2));                             \
558      class OutOfLineStoreInteger final : public OutOfLineCode {               \
559       public:                                                                 \
560        OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
561                              Register index1, int32_t index2, int32_t length, \
562                              Value value)                                     \
563            : OutOfLineCode(gen),                                              \
564              buffer_(buffer),                                                 \
565              index1_(index1),                                                 \
566              index2_(index2),                                                 \
567              length_(length),                                                 \
568              value_(value) {}                                                 \
569                                                                               \
570        void Generate() final {                                                \
571          __ leal(kScratchRegister, Operand(index1_, index2_));                \
572          __ cmpl(kScratchRegister, Immediate(length_));                       \
573          __ j(above_equal, exit());                                           \
574          __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
575                       value_);                                                \
576        }                                                                      \
577                                                                               \
578       private:                                                                \
579        Register const buffer_;                                                \
580        Register const index1_;                                                \
581        int32_t const index2_;                                                 \
582        int32_t const length_;                                                 \
583        Value const value_;                                                    \
584      };                                                                       \
585      auto ool = new (zone())                                                  \
586          OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
587      __ j(above_equal, ool->entry());                                         \
588      __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
589      __ bind(ool->exit());                                                    \
590    }                                                                          \
591  } while (false)
592
593
594#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
595  do {                                                           \
596    if (instr->InputAt(4)->IsRegister()) {                       \
597      Register value = i.InputRegister(4);                       \
598      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
599    } else {                                                     \
600      Immediate value = i.InputImmediate(4);                     \
601      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
602    }                                                            \
603  } while (false)
604
605void CodeGenerator::AssembleDeconstructFrame() {
606  __ movq(rsp, rbp);
607  __ popq(rbp);
608}
609
610void CodeGenerator::AssembleSetupStackPointer() {}
611
612void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
613  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
614  if (sp_slot_delta > 0) {
615    __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
616  }
617  frame_access_state()->SetFrameAccessToDefault();
618}
619
620
621void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
622  int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
623  if (sp_slot_delta < 0) {
624    __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
625    frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
626  }
627  if (frame_access_state()->has_frame()) {
628    __ movq(rbp, MemOperand(rbp, 0));
629  }
630  frame_access_state()->SetFrameAccessToSP();
631}
632
633void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
634                                                     Register scratch1,
635                                                     Register scratch2,
636                                                     Register scratch3) {
637  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
638  Label done;
639
640  // Check if current frame is an arguments adaptor frame.
641  __ Cmp(Operand(rbp, StandardFrameConstants::kContextOffset),
642         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
643  __ j(not_equal, &done, Label::kNear);
644
645  // Load arguments count from current arguments adaptor frame (note, it
646  // does not include receiver).
647  Register caller_args_count_reg = scratch1;
648  __ SmiToInteger32(
649      caller_args_count_reg,
650      Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
651
652  ParameterCount callee_args_count(args_reg);
653  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
654                        scratch3, ReturnAddressState::kOnStack);
655  __ bind(&done);
656}
657
658// Assembles an instruction after register allocation, producing machine code.
659void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
660  X64OperandConverter i(this, instr);
661  InstructionCode opcode = instr->opcode();
662  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
663  switch (arch_opcode) {
664    case kArchCallCodeObject: {
665      EnsureSpaceForLazyDeopt();
666      if (HasImmediateInput(instr, 0)) {
667        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
668        __ Call(code, RelocInfo::CODE_TARGET);
669      } else {
670        Register reg = i.InputRegister(0);
671        __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
672        __ call(reg);
673      }
674      RecordCallPosition(instr);
675      frame_access_state()->ClearSPDelta();
676      break;
677    }
678    case kArchTailCallCodeObjectFromJSFunction:
679    case kArchTailCallCodeObject: {
680      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
681      AssembleDeconstructActivationRecord(stack_param_delta);
682      if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
683        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
684                                         i.TempRegister(0), i.TempRegister(1),
685                                         i.TempRegister(2));
686      }
687      if (HasImmediateInput(instr, 0)) {
688        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
689        __ jmp(code, RelocInfo::CODE_TARGET);
690      } else {
691        Register reg = i.InputRegister(0);
692        __ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
693        __ jmp(reg);
694      }
695      frame_access_state()->ClearSPDelta();
696      break;
697    }
698    case kArchCallJSFunction: {
699      EnsureSpaceForLazyDeopt();
700      Register func = i.InputRegister(0);
701      if (FLAG_debug_code) {
702        // Check the function's context matches the context argument.
703        __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
704        __ Assert(equal, kWrongFunctionContext);
705      }
706      __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
707      frame_access_state()->ClearSPDelta();
708      RecordCallPosition(instr);
709      break;
710    }
711    case kArchTailCallJSFunctionFromJSFunction:
712    case kArchTailCallJSFunction: {
713      Register func = i.InputRegister(0);
714      if (FLAG_debug_code) {
715        // Check the function's context matches the context argument.
716        __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
717        __ Assert(equal, kWrongFunctionContext);
718      }
719      int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
720      AssembleDeconstructActivationRecord(stack_param_delta);
721      if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
722        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
723                                         i.TempRegister(0), i.TempRegister(1),
724                                         i.TempRegister(2));
725      }
726      __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
727      frame_access_state()->ClearSPDelta();
728      break;
729    }
730    case kArchPrepareCallCFunction: {
731      // Frame alignment requires using FP-relative frame addressing.
732      frame_access_state()->SetFrameAccessToFP();
733      int const num_parameters = MiscField::decode(instr->opcode());
734      __ PrepareCallCFunction(num_parameters);
735      break;
736    }
737    case kArchPrepareTailCall:
738      AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
739      break;
740    case kArchCallCFunction: {
741      int const num_parameters = MiscField::decode(instr->opcode());
742      if (HasImmediateInput(instr, 0)) {
743        ExternalReference ref = i.InputExternalReference(0);
744        __ CallCFunction(ref, num_parameters);
745      } else {
746        Register func = i.InputRegister(0);
747        __ CallCFunction(func, num_parameters);
748      }
749      frame_access_state()->SetFrameAccessToDefault();
750      frame_access_state()->ClearSPDelta();
751      break;
752    }
753    case kArchJmp:
754      AssembleArchJump(i.InputRpo(0));
755      break;
756    case kArchLookupSwitch:
757      AssembleArchLookupSwitch(instr);
758      break;
759    case kArchTableSwitch:
760      AssembleArchTableSwitch(instr);
761      break;
762    case kArchNop:
763    case kArchThrowTerminator:
764      // don't emit code for nops.
765      break;
766    case kArchDeoptimize: {
767      int deopt_state_id =
768          BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
769      Deoptimizer::BailoutType bailout_type =
770          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
771      AssembleDeoptimizerCall(deopt_state_id, bailout_type);
772      break;
773    }
774    case kArchRet:
775      AssembleReturn();
776      break;
777    case kArchStackPointer:
778      __ movq(i.OutputRegister(), rsp);
779      break;
780    case kArchFramePointer:
781      __ movq(i.OutputRegister(), rbp);
782      break;
783    case kArchParentFramePointer:
784      if (frame_access_state()->has_frame()) {
785        __ movq(i.OutputRegister(), Operand(rbp, 0));
786      } else {
787        __ movq(i.OutputRegister(), rbp);
788      }
789      break;
790    case kArchTruncateDoubleToI: {
791      auto result = i.OutputRegister();
792      auto input = i.InputDoubleRegister(0);
793      auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
794      __ Cvttsd2siq(result, input);
795      __ cmpq(result, Immediate(1));
796      __ j(overflow, ool->entry());
797      __ bind(ool->exit());
798      break;
799    }
800    case kArchStoreWithWriteBarrier: {
801      RecordWriteMode mode =
802          static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
803      Register object = i.InputRegister(0);
804      size_t index = 0;
805      Operand operand = i.MemoryOperand(&index);
806      Register value = i.InputRegister(index);
807      Register scratch0 = i.TempRegister(0);
808      Register scratch1 = i.TempRegister(1);
809      auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
810                                                   scratch0, scratch1, mode);
811      __ movp(operand, value);
812      __ CheckPageFlag(object, scratch0,
813                       MemoryChunk::kPointersFromHereAreInterestingMask,
814                       not_zero, ool->entry());
815      __ bind(ool->exit());
816      break;
817    }
818    case kArchStackSlot: {
819      FrameOffset offset =
820          frame_access_state()->GetFrameOffset(i.InputInt32(0));
821      Register base;
822      if (offset.from_stack_pointer()) {
823        base = rsp;
824      } else {
825        base = rbp;
826      }
827      __ leaq(i.OutputRegister(), Operand(base, offset.offset()));
828      break;
829    }
830    case kX64Add32:
831      ASSEMBLE_BINOP(addl);
832      break;
833    case kX64Add:
834      ASSEMBLE_BINOP(addq);
835      break;
836    case kX64Sub32:
837      ASSEMBLE_BINOP(subl);
838      break;
839    case kX64Sub:
840      ASSEMBLE_BINOP(subq);
841      break;
842    case kX64And32:
843      ASSEMBLE_BINOP(andl);
844      break;
845    case kX64And:
846      ASSEMBLE_BINOP(andq);
847      break;
848    case kX64Cmp8:
849      ASSEMBLE_COMPARE(cmpb);
850      break;
851    case kX64Cmp16:
852      ASSEMBLE_COMPARE(cmpw);
853      break;
854    case kX64Cmp32:
855      ASSEMBLE_COMPARE(cmpl);
856      break;
857    case kX64Cmp:
858      ASSEMBLE_COMPARE(cmpq);
859      break;
860    case kX64Test8:
861      ASSEMBLE_COMPARE(testb);
862      break;
863    case kX64Test16:
864      ASSEMBLE_COMPARE(testw);
865      break;
866    case kX64Test32:
867      ASSEMBLE_COMPARE(testl);
868      break;
869    case kX64Test:
870      ASSEMBLE_COMPARE(testq);
871      break;
872    case kX64Imul32:
873      ASSEMBLE_MULT(imull);
874      break;
875    case kX64Imul:
876      ASSEMBLE_MULT(imulq);
877      break;
878    case kX64ImulHigh32:
879      if (instr->InputAt(1)->IsRegister()) {
880        __ imull(i.InputRegister(1));
881      } else {
882        __ imull(i.InputOperand(1));
883      }
884      break;
885    case kX64UmulHigh32:
886      if (instr->InputAt(1)->IsRegister()) {
887        __ mull(i.InputRegister(1));
888      } else {
889        __ mull(i.InputOperand(1));
890      }
891      break;
892    case kX64Idiv32:
893      __ cdq();
894      __ idivl(i.InputRegister(1));
895      break;
896    case kX64Idiv:
897      __ cqo();
898      __ idivq(i.InputRegister(1));
899      break;
900    case kX64Udiv32:
901      __ xorl(rdx, rdx);
902      __ divl(i.InputRegister(1));
903      break;
904    case kX64Udiv:
905      __ xorq(rdx, rdx);
906      __ divq(i.InputRegister(1));
907      break;
908    case kX64Not:
909      ASSEMBLE_UNOP(notq);
910      break;
911    case kX64Not32:
912      ASSEMBLE_UNOP(notl);
913      break;
914    case kX64Neg:
915      ASSEMBLE_UNOP(negq);
916      break;
917    case kX64Neg32:
918      ASSEMBLE_UNOP(negl);
919      break;
920    case kX64Or32:
921      ASSEMBLE_BINOP(orl);
922      break;
923    case kX64Or:
924      ASSEMBLE_BINOP(orq);
925      break;
926    case kX64Xor32:
927      ASSEMBLE_BINOP(xorl);
928      break;
929    case kX64Xor:
930      ASSEMBLE_BINOP(xorq);
931      break;
932    case kX64Shl32:
933      ASSEMBLE_SHIFT(shll, 5);
934      break;
935    case kX64Shl:
936      ASSEMBLE_SHIFT(shlq, 6);
937      break;
938    case kX64Shr32:
939      ASSEMBLE_SHIFT(shrl, 5);
940      break;
941    case kX64Shr:
942      ASSEMBLE_SHIFT(shrq, 6);
943      break;
944    case kX64Sar32:
945      ASSEMBLE_SHIFT(sarl, 5);
946      break;
947    case kX64Sar:
948      ASSEMBLE_SHIFT(sarq, 6);
949      break;
950    case kX64Ror32:
951      ASSEMBLE_SHIFT(rorl, 5);
952      break;
953    case kX64Ror:
954      ASSEMBLE_SHIFT(rorq, 6);
955      break;
956    case kX64Lzcnt:
957      if (instr->InputAt(0)->IsRegister()) {
958        __ Lzcntq(i.OutputRegister(), i.InputRegister(0));
959      } else {
960        __ Lzcntq(i.OutputRegister(), i.InputOperand(0));
961      }
962      break;
963    case kX64Lzcnt32:
964      if (instr->InputAt(0)->IsRegister()) {
965        __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
966      } else {
967        __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
968      }
969      break;
970    case kX64Tzcnt:
971      if (instr->InputAt(0)->IsRegister()) {
972        __ Tzcntq(i.OutputRegister(), i.InputRegister(0));
973      } else {
974        __ Tzcntq(i.OutputRegister(), i.InputOperand(0));
975      }
976      break;
977    case kX64Tzcnt32:
978      if (instr->InputAt(0)->IsRegister()) {
979        __ Tzcntl(i.OutputRegister(), i.InputRegister(0));
980      } else {
981        __ Tzcntl(i.OutputRegister(), i.InputOperand(0));
982      }
983      break;
984    case kX64Popcnt:
985      if (instr->InputAt(0)->IsRegister()) {
986        __ Popcntq(i.OutputRegister(), i.InputRegister(0));
987      } else {
988        __ Popcntq(i.OutputRegister(), i.InputOperand(0));
989      }
990      break;
991    case kX64Popcnt32:
992      if (instr->InputAt(0)->IsRegister()) {
993        __ Popcntl(i.OutputRegister(), i.InputRegister(0));
994      } else {
995        __ Popcntl(i.OutputRegister(), i.InputOperand(0));
996      }
997      break;
998    case kSSEFloat32Cmp:
999      ASSEMBLE_SSE_BINOP(Ucomiss);
1000      break;
1001    case kSSEFloat32Add:
1002      ASSEMBLE_SSE_BINOP(addss);
1003      break;
1004    case kSSEFloat32Sub:
1005      ASSEMBLE_SSE_BINOP(subss);
1006      break;
1007    case kSSEFloat32Mul:
1008      ASSEMBLE_SSE_BINOP(mulss);
1009      break;
1010    case kSSEFloat32Div:
1011      ASSEMBLE_SSE_BINOP(divss);
1012      // Don't delete this mov. It may improve performance on some CPUs,
1013      // when there is a (v)mulss depending on the result.
1014      __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1015      break;
1016    case kSSEFloat32Abs: {
1017      // TODO(bmeurer): Use RIP relative 128-bit constants.
1018      __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1019      __ psrlq(kScratchDoubleReg, 33);
1020      __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
1021      break;
1022    }
1023    case kSSEFloat32Neg: {
1024      // TODO(bmeurer): Use RIP relative 128-bit constants.
1025      __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1026      __ psllq(kScratchDoubleReg, 31);
1027      __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
1028      break;
1029    }
1030    case kSSEFloat32Sqrt:
1031      ASSEMBLE_SSE_UNOP(sqrtss);
1032      break;
1033    case kSSEFloat32Max:
1034      ASSEMBLE_SSE_BINOP(maxss);
1035      break;
1036    case kSSEFloat32Min:
1037      ASSEMBLE_SSE_BINOP(minss);
1038      break;
1039    case kSSEFloat32ToFloat64:
1040      ASSEMBLE_SSE_UNOP(Cvtss2sd);
1041      break;
1042    case kSSEFloat32Round: {
1043      CpuFeatureScope sse_scope(masm(), SSE4_1);
1044      RoundingMode const mode =
1045          static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1046      __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
1047      break;
1048    }
1049    case kSSEFloat32ToInt32:
1050      if (instr->InputAt(0)->IsDoubleRegister()) {
1051        __ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
1052      } else {
1053        __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
1054      }
1055      break;
1056    case kSSEFloat32ToUint32: {
1057      if (instr->InputAt(0)->IsDoubleRegister()) {
1058        __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1059      } else {
1060        __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1061      }
1062      break;
1063    }
1064    case kSSEFloat64Cmp:
1065      ASSEMBLE_SSE_BINOP(Ucomisd);
1066      break;
1067    case kSSEFloat64Add:
1068      ASSEMBLE_SSE_BINOP(addsd);
1069      break;
1070    case kSSEFloat64Sub:
1071      ASSEMBLE_SSE_BINOP(subsd);
1072      break;
1073    case kSSEFloat64Mul:
1074      ASSEMBLE_SSE_BINOP(mulsd);
1075      break;
1076    case kSSEFloat64Div:
1077      ASSEMBLE_SSE_BINOP(divsd);
1078      // Don't delete this mov. It may improve performance on some CPUs,
1079      // when there is a (v)mulsd depending on the result.
1080      __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1081      break;
1082    case kSSEFloat64Mod: {
1083      __ subq(rsp, Immediate(kDoubleSize));
1084      // Move values to st(0) and st(1).
1085      __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
1086      __ fld_d(Operand(rsp, 0));
1087      __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
1088      __ fld_d(Operand(rsp, 0));
1089      // Loop while fprem isn't done.
1090      Label mod_loop;
1091      __ bind(&mod_loop);
1092      // This instructions traps on all kinds inputs, but we are assuming the
1093      // floating point control word is set to ignore them all.
1094      __ fprem();
1095      // The following 2 instruction implicitly use rax.
1096      __ fnstsw_ax();
1097      if (CpuFeatures::IsSupported(SAHF)) {
1098        CpuFeatureScope sahf_scope(masm(), SAHF);
1099        __ sahf();
1100      } else {
1101        __ shrl(rax, Immediate(8));
1102        __ andl(rax, Immediate(0xFF));
1103        __ pushq(rax);
1104        __ popfq();
1105      }
1106      __ j(parity_even, &mod_loop);
1107      // Move output to stack and clean up.
1108      __ fstp(1);
1109      __ fstp_d(Operand(rsp, 0));
1110      __ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
1111      __ addq(rsp, Immediate(kDoubleSize));
1112      break;
1113    }
1114    case kSSEFloat64Max:
1115      ASSEMBLE_SSE_BINOP(maxsd);
1116      break;
1117    case kSSEFloat64Min:
1118      ASSEMBLE_SSE_BINOP(minsd);
1119      break;
1120    case kSSEFloat64Abs: {
1121      // TODO(bmeurer): Use RIP relative 128-bit constants.
1122      __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1123      __ psrlq(kScratchDoubleReg, 1);
1124      __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1125      break;
1126    }
1127    case kSSEFloat64Neg: {
1128      // TODO(bmeurer): Use RIP relative 128-bit constants.
1129      __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
1130      __ psllq(kScratchDoubleReg, 63);
1131      __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
1132      break;
1133    }
1134    case kSSEFloat64Sqrt:
1135      ASSEMBLE_SSE_UNOP(sqrtsd);
1136      break;
1137    case kSSEFloat64Round: {
1138      CpuFeatureScope sse_scope(masm(), SSE4_1);
1139      RoundingMode const mode =
1140          static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
1141      __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
1142      break;
1143    }
1144    case kSSEFloat64ToFloat32:
1145      ASSEMBLE_SSE_UNOP(Cvtsd2ss);
1146      break;
1147    case kSSEFloat64ToInt32:
1148      if (instr->InputAt(0)->IsDoubleRegister()) {
1149        __ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
1150      } else {
1151        __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
1152      }
1153      break;
1154    case kSSEFloat64ToUint32: {
1155      if (instr->InputAt(0)->IsDoubleRegister()) {
1156        __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1157      } else {
1158        __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
1159      }
1160      if (MiscField::decode(instr->opcode())) {
1161        __ AssertZeroExtended(i.OutputRegister());
1162      }
1163      break;
1164    }
1165    case kSSEFloat32ToInt64:
1166      if (instr->InputAt(0)->IsDoubleRegister()) {
1167        __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1168      } else {
1169        __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1170      }
1171      if (instr->OutputCount() > 1) {
1172        __ Set(i.OutputRegister(1), 1);
1173        Label done;
1174        Label fail;
1175        __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
1176        if (instr->InputAt(0)->IsDoubleRegister()) {
1177          __ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
1178        } else {
1179          __ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
1180        }
1181        // If the input is NaN, then the conversion fails.
1182        __ j(parity_even, &fail);
1183        // If the input is INT64_MIN, then the conversion succeeds.
1184        __ j(equal, &done);
1185        __ cmpq(i.OutputRegister(0), Immediate(1));
1186        // If the conversion results in INT64_MIN, but the input was not
1187        // INT64_MIN, then the conversion fails.
1188        __ j(no_overflow, &done);
1189        __ bind(&fail);
1190        __ Set(i.OutputRegister(1), 0);
1191        __ bind(&done);
1192      }
1193      break;
1194    case kSSEFloat64ToInt64:
1195      if (instr->InputAt(0)->IsDoubleRegister()) {
1196        __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
1197      } else {
1198        __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
1199      }
1200      if (instr->OutputCount() > 1) {
1201        __ Set(i.OutputRegister(1), 1);
1202        Label done;
1203        Label fail;
1204        __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
1205        if (instr->InputAt(0)->IsDoubleRegister()) {
1206          __ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
1207        } else {
1208          __ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
1209        }
1210        // If the input is NaN, then the conversion fails.
1211        __ j(parity_even, &fail);
1212        // If the input is INT64_MIN, then the conversion succeeds.
1213        __ j(equal, &done);
1214        __ cmpq(i.OutputRegister(0), Immediate(1));
1215        // If the conversion results in INT64_MIN, but the input was not
1216        // INT64_MIN, then the conversion fails.
1217        __ j(no_overflow, &done);
1218        __ bind(&fail);
1219        __ Set(i.OutputRegister(1), 0);
1220        __ bind(&done);
1221      }
1222      break;
1223    case kSSEFloat32ToUint64: {
1224      Label done;
1225      Label success;
1226      if (instr->OutputCount() > 1) {
1227        __ Set(i.OutputRegister(1), 0);
1228      }
1229      // There does not exist a Float32ToUint64 instruction, so we have to use
1230      // the Float32ToInt64 instruction.
1231      if (instr->InputAt(0)->IsDoubleRegister()) {
1232        __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1233      } else {
1234        __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
1235      }
1236      // Check if the result of the Float32ToInt64 conversion is positive, we
1237      // are already done.
1238      __ testq(i.OutputRegister(), i.OutputRegister());
1239      __ j(positive, &success);
1240      // The result of the first conversion was negative, which means that the
1241      // input value was not within the positive int64 range. We subtract 2^64
1242      // and convert it again to see if it is within the uint64 range.
1243      __ Move(kScratchDoubleReg, -9223372036854775808.0f);
1244      if (instr->InputAt(0)->IsDoubleRegister()) {
1245        __ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
1246      } else {
1247        __ addss(kScratchDoubleReg, i.InputOperand(0));
1248      }
1249      __ Cvttss2siq(i.OutputRegister(), kScratchDoubleReg);
1250      __ testq(i.OutputRegister(), i.OutputRegister());
1251      // The only possible negative value here is 0x80000000000000000, which is
1252      // used on x64 to indicate an integer overflow.
1253      __ j(negative, &done);
1254      // The input value is within uint64 range and the second conversion worked
1255      // successfully, but we still have to undo the subtraction we did
1256      // earlier.
1257      __ Set(kScratchRegister, 0x8000000000000000);
1258      __ orq(i.OutputRegister(), kScratchRegister);
1259      __ bind(&success);
1260      if (instr->OutputCount() > 1) {
1261        __ Set(i.OutputRegister(1), 1);
1262      }
1263      __ bind(&done);
1264      break;
1265    }
1266    case kSSEFloat64ToUint64: {
1267      Label done;
1268      Label success;
1269      if (instr->OutputCount() > 1) {
1270        __ Set(i.OutputRegister(1), 0);
1271      }
1272      // There does not exist a Float64ToUint64 instruction, so we have to use
1273      // the Float64ToInt64 instruction.
1274      if (instr->InputAt(0)->IsDoubleRegister()) {
1275        __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
1276      } else {
1277        __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
1278      }
1279      // Check if the result of the Float64ToInt64 conversion is positive, we
1280      // are already done.
1281      __ testq(i.OutputRegister(), i.OutputRegister());
1282      __ j(positive, &success);
1283      // The result of the first conversion was negative, which means that the
1284      // input value was not within the positive int64 range. We subtract 2^64
1285      // and convert it again to see if it is within the uint64 range.
1286      __ Move(kScratchDoubleReg, -9223372036854775808.0);
1287      if (instr->InputAt(0)->IsDoubleRegister()) {
1288        __ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
1289      } else {
1290        __ addsd(kScratchDoubleReg, i.InputOperand(0));
1291      }
1292      __ Cvttsd2siq(i.OutputRegister(), kScratchDoubleReg);
1293      __ testq(i.OutputRegister(), i.OutputRegister());
1294      // The only possible negative value here is 0x80000000000000000, which is
1295      // used on x64 to indicate an integer overflow.
1296      __ j(negative, &done);
1297      // The input value is within uint64 range and the second conversion worked
1298      // successfully, but we still have to undo the subtraction we did
1299      // earlier.
1300      __ Set(kScratchRegister, 0x8000000000000000);
1301      __ orq(i.OutputRegister(), kScratchRegister);
1302      __ bind(&success);
1303      if (instr->OutputCount() > 1) {
1304        __ Set(i.OutputRegister(1), 1);
1305      }
1306      __ bind(&done);
1307      break;
1308    }
1309    case kSSEInt32ToFloat64:
1310      if (instr->InputAt(0)->IsRegister()) {
1311        __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
1312      } else {
1313        __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
1314      }
1315      break;
1316    case kSSEInt32ToFloat32:
1317      if (instr->InputAt(0)->IsRegister()) {
1318        __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
1319      } else {
1320        __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
1321      }
1322      break;
1323    case kSSEInt64ToFloat32:
1324      if (instr->InputAt(0)->IsRegister()) {
1325        __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
1326      } else {
1327        __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
1328      }
1329      break;
1330    case kSSEInt64ToFloat64:
1331      if (instr->InputAt(0)->IsRegister()) {
1332        __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
1333      } else {
1334        __ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
1335      }
1336      break;
1337    case kSSEUint64ToFloat32:
1338      if (instr->InputAt(0)->IsRegister()) {
1339        __ movq(kScratchRegister, i.InputRegister(0));
1340      } else {
1341        __ movq(kScratchRegister, i.InputOperand(0));
1342      }
1343      __ Cvtqui2ss(i.OutputDoubleRegister(), kScratchRegister,
1344                   i.TempRegister(0));
1345      break;
1346    case kSSEUint64ToFloat64:
1347      if (instr->InputAt(0)->IsRegister()) {
1348        __ movq(kScratchRegister, i.InputRegister(0));
1349      } else {
1350        __ movq(kScratchRegister, i.InputOperand(0));
1351      }
1352      __ Cvtqui2sd(i.OutputDoubleRegister(), kScratchRegister,
1353                   i.TempRegister(0));
1354      break;
1355    case kSSEUint32ToFloat64:
1356      if (instr->InputAt(0)->IsRegister()) {
1357        __ movl(kScratchRegister, i.InputRegister(0));
1358      } else {
1359        __ movl(kScratchRegister, i.InputOperand(0));
1360      }
1361      __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
1362      break;
1363    case kSSEUint32ToFloat32:
1364      if (instr->InputAt(0)->IsRegister()) {
1365        __ movl(kScratchRegister, i.InputRegister(0));
1366      } else {
1367        __ movl(kScratchRegister, i.InputOperand(0));
1368      }
1369      __ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
1370      break;
1371    case kSSEFloat64ExtractLowWord32:
1372      if (instr->InputAt(0)->IsDoubleStackSlot()) {
1373        __ movl(i.OutputRegister(), i.InputOperand(0));
1374      } else {
1375        __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
1376      }
1377      break;
1378    case kSSEFloat64ExtractHighWord32:
1379      if (instr->InputAt(0)->IsDoubleStackSlot()) {
1380        __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
1381      } else {
1382        __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
1383      }
1384      break;
1385    case kSSEFloat64InsertLowWord32:
1386      if (instr->InputAt(1)->IsRegister()) {
1387        __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
1388      } else {
1389        __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
1390      }
1391      break;
1392    case kSSEFloat64InsertHighWord32:
1393      if (instr->InputAt(1)->IsRegister()) {
1394        __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
1395      } else {
1396        __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
1397      }
1398      break;
1399    case kSSEFloat64LoadLowWord32:
1400      if (instr->InputAt(0)->IsRegister()) {
1401        __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
1402      } else {
1403        __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
1404      }
1405      break;
1406    case kAVXFloat32Cmp: {
1407      CpuFeatureScope avx_scope(masm(), AVX);
1408      if (instr->InputAt(1)->IsDoubleRegister()) {
1409        __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1410      } else {
1411        __ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
1412      }
1413      break;
1414    }
1415    case kAVXFloat32Add:
1416      ASSEMBLE_AVX_BINOP(vaddss);
1417      break;
1418    case kAVXFloat32Sub:
1419      ASSEMBLE_AVX_BINOP(vsubss);
1420      break;
1421    case kAVXFloat32Mul:
1422      ASSEMBLE_AVX_BINOP(vmulss);
1423      break;
1424    case kAVXFloat32Div:
1425      ASSEMBLE_AVX_BINOP(vdivss);
1426      // Don't delete this mov. It may improve performance on some CPUs,
1427      // when there is a (v)mulss depending on the result.
1428      __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1429      break;
1430    case kAVXFloat32Max:
1431      ASSEMBLE_AVX_BINOP(vmaxss);
1432      break;
1433    case kAVXFloat32Min:
1434      ASSEMBLE_AVX_BINOP(vminss);
1435      break;
1436    case kAVXFloat64Cmp: {
1437      CpuFeatureScope avx_scope(masm(), AVX);
1438      if (instr->InputAt(1)->IsDoubleRegister()) {
1439        __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1440      } else {
1441        __ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
1442      }
1443      break;
1444    }
1445    case kAVXFloat64Add:
1446      ASSEMBLE_AVX_BINOP(vaddsd);
1447      break;
1448    case kAVXFloat64Sub:
1449      ASSEMBLE_AVX_BINOP(vsubsd);
1450      break;
1451    case kAVXFloat64Mul:
1452      ASSEMBLE_AVX_BINOP(vmulsd);
1453      break;
1454    case kAVXFloat64Div:
1455      ASSEMBLE_AVX_BINOP(vdivsd);
1456      // Don't delete this mov. It may improve performance on some CPUs,
1457      // when there is a (v)mulsd depending on the result.
1458      __ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
1459      break;
1460    case kAVXFloat64Max:
1461      ASSEMBLE_AVX_BINOP(vmaxsd);
1462      break;
1463    case kAVXFloat64Min:
1464      ASSEMBLE_AVX_BINOP(vminsd);
1465      break;
1466    case kAVXFloat32Abs: {
1467      // TODO(bmeurer): Use RIP relative 128-bit constants.
1468      CpuFeatureScope avx_scope(masm(), AVX);
1469      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1470      __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
1471      if (instr->InputAt(0)->IsDoubleRegister()) {
1472        __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1473                  i.InputDoubleRegister(0));
1474      } else {
1475        __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
1476                  i.InputOperand(0));
1477      }
1478      break;
1479    }
1480    case kAVXFloat32Neg: {
1481      // TODO(bmeurer): Use RIP relative 128-bit constants.
1482      CpuFeatureScope avx_scope(masm(), AVX);
1483      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1484      __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
1485      if (instr->InputAt(0)->IsDoubleRegister()) {
1486        __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1487                  i.InputDoubleRegister(0));
1488      } else {
1489        __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
1490                  i.InputOperand(0));
1491      }
1492      break;
1493    }
1494    case kAVXFloat64Abs: {
1495      // TODO(bmeurer): Use RIP relative 128-bit constants.
1496      CpuFeatureScope avx_scope(masm(), AVX);
1497      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1498      __ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
1499      if (instr->InputAt(0)->IsDoubleRegister()) {
1500        __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1501                  i.InputDoubleRegister(0));
1502      } else {
1503        __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1504                  i.InputOperand(0));
1505      }
1506      break;
1507    }
1508    case kAVXFloat64Neg: {
1509      // TODO(bmeurer): Use RIP relative 128-bit constants.
1510      CpuFeatureScope avx_scope(masm(), AVX);
1511      __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
1512      __ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
1513      if (instr->InputAt(0)->IsDoubleRegister()) {
1514        __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1515                  i.InputDoubleRegister(0));
1516      } else {
1517        __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
1518                  i.InputOperand(0));
1519      }
1520      break;
1521    }
1522    case kX64Movsxbl:
1523      ASSEMBLE_MOVX(movsxbl);
1524      __ AssertZeroExtended(i.OutputRegister());
1525      break;
1526    case kX64Movzxbl:
1527      ASSEMBLE_MOVX(movzxbl);
1528      __ AssertZeroExtended(i.OutputRegister());
1529      break;
1530    case kX64Movb: {
1531      size_t index = 0;
1532      Operand operand = i.MemoryOperand(&index);
1533      if (HasImmediateInput(instr, index)) {
1534        __ movb(operand, Immediate(i.InputInt8(index)));
1535      } else {
1536        __ movb(operand, i.InputRegister(index));
1537      }
1538      break;
1539    }
1540    case kX64Movsxwl:
1541      ASSEMBLE_MOVX(movsxwl);
1542      __ AssertZeroExtended(i.OutputRegister());
1543      break;
1544    case kX64Movzxwl:
1545      ASSEMBLE_MOVX(movzxwl);
1546      __ AssertZeroExtended(i.OutputRegister());
1547      break;
1548    case kX64Movw: {
1549      size_t index = 0;
1550      Operand operand = i.MemoryOperand(&index);
1551      if (HasImmediateInput(instr, index)) {
1552        __ movw(operand, Immediate(i.InputInt16(index)));
1553      } else {
1554        __ movw(operand, i.InputRegister(index));
1555      }
1556      break;
1557    }
1558    case kX64Movl:
1559      if (instr->HasOutput()) {
1560        if (instr->addressing_mode() == kMode_None) {
1561          if (instr->InputAt(0)->IsRegister()) {
1562            __ movl(i.OutputRegister(), i.InputRegister(0));
1563          } else {
1564            __ movl(i.OutputRegister(), i.InputOperand(0));
1565          }
1566        } else {
1567          __ movl(i.OutputRegister(), i.MemoryOperand());
1568        }
1569        __ AssertZeroExtended(i.OutputRegister());
1570      } else {
1571        size_t index = 0;
1572        Operand operand = i.MemoryOperand(&index);
1573        if (HasImmediateInput(instr, index)) {
1574          __ movl(operand, i.InputImmediate(index));
1575        } else {
1576          __ movl(operand, i.InputRegister(index));
1577        }
1578      }
1579      break;
1580    case kX64Movsxlq:
1581      ASSEMBLE_MOVX(movsxlq);
1582      break;
1583    case kX64Movq:
1584      if (instr->HasOutput()) {
1585        __ movq(i.OutputRegister(), i.MemoryOperand());
1586      } else {
1587        size_t index = 0;
1588        Operand operand = i.MemoryOperand(&index);
1589        if (HasImmediateInput(instr, index)) {
1590          __ movq(operand, i.InputImmediate(index));
1591        } else {
1592          __ movq(operand, i.InputRegister(index));
1593        }
1594      }
1595      break;
1596    case kX64Movss:
1597      if (instr->HasOutput()) {
1598        __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
1599      } else {
1600        size_t index = 0;
1601        Operand operand = i.MemoryOperand(&index);
1602        __ movss(operand, i.InputDoubleRegister(index));
1603      }
1604      break;
1605    case kX64Movsd:
1606      if (instr->HasOutput()) {
1607        __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
1608      } else {
1609        size_t index = 0;
1610        Operand operand = i.MemoryOperand(&index);
1611        __ Movsd(operand, i.InputDoubleRegister(index));
1612      }
1613      break;
1614    case kX64BitcastFI:
1615      if (instr->InputAt(0)->IsDoubleStackSlot()) {
1616        __ movl(i.OutputRegister(), i.InputOperand(0));
1617      } else {
1618        __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
1619      }
1620      break;
1621    case kX64BitcastDL:
1622      if (instr->InputAt(0)->IsDoubleStackSlot()) {
1623        __ movq(i.OutputRegister(), i.InputOperand(0));
1624      } else {
1625        __ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
1626      }
1627      break;
1628    case kX64BitcastIF:
1629      if (instr->InputAt(0)->IsRegister()) {
1630        __ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
1631      } else {
1632        __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
1633      }
1634      break;
1635    case kX64BitcastLD:
1636      if (instr->InputAt(0)->IsRegister()) {
1637        __ Movq(i.OutputDoubleRegister(), i.InputRegister(0));
1638      } else {
1639        __ Movsd(i.OutputDoubleRegister(), i.InputOperand(0));
1640      }
1641      break;
1642    case kX64Lea32: {
1643      AddressingMode mode = AddressingModeField::decode(instr->opcode());
1644      // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
1645      // and addressing mode just happens to work out. The "addl"/"subl" forms
1646      // in these cases are faster based on measurements.
1647      if (i.InputRegister(0).is(i.OutputRegister())) {
1648        if (mode == kMode_MRI) {
1649          int32_t constant_summand = i.InputInt32(1);
1650          if (constant_summand > 0) {
1651            __ addl(i.OutputRegister(), Immediate(constant_summand));
1652          } else if (constant_summand < 0) {
1653            __ subl(i.OutputRegister(), Immediate(-constant_summand));
1654          }
1655        } else if (mode == kMode_MR1) {
1656          if (i.InputRegister(1).is(i.OutputRegister())) {
1657            __ shll(i.OutputRegister(), Immediate(1));
1658          } else {
1659            __ leal(i.OutputRegister(), i.MemoryOperand());
1660          }
1661        } else if (mode == kMode_M2) {
1662          __ shll(i.OutputRegister(), Immediate(1));
1663        } else if (mode == kMode_M4) {
1664          __ shll(i.OutputRegister(), Immediate(2));
1665        } else if (mode == kMode_M8) {
1666          __ shll(i.OutputRegister(), Immediate(3));
1667        } else {
1668          __ leal(i.OutputRegister(), i.MemoryOperand());
1669        }
1670      } else {
1671        __ leal(i.OutputRegister(), i.MemoryOperand());
1672      }
1673      __ AssertZeroExtended(i.OutputRegister());
1674      break;
1675    }
1676    case kX64Lea:
1677      __ leaq(i.OutputRegister(), i.MemoryOperand());
1678      break;
1679    case kX64Dec32:
1680      __ decl(i.OutputRegister());
1681      break;
1682    case kX64Inc32:
1683      __ incl(i.OutputRegister());
1684      break;
1685    case kX64Push:
1686      if (HasImmediateInput(instr, 0)) {
1687        __ pushq(i.InputImmediate(0));
1688        frame_access_state()->IncreaseSPDelta(1);
1689      } else {
1690        if (instr->InputAt(0)->IsRegister()) {
1691          __ pushq(i.InputRegister(0));
1692          frame_access_state()->IncreaseSPDelta(1);
1693        } else if (instr->InputAt(0)->IsDoubleRegister()) {
1694          // TODO(titzer): use another machine instruction?
1695          __ subq(rsp, Immediate(kDoubleSize));
1696          frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1697          __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
1698        } else {
1699          __ pushq(i.InputOperand(0));
1700          frame_access_state()->IncreaseSPDelta(1);
1701        }
1702      }
1703      break;
1704    case kX64Poke: {
1705      int const slot = MiscField::decode(instr->opcode());
1706      if (HasImmediateInput(instr, 0)) {
1707        __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
1708      } else {
1709        __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
1710      }
1711      break;
1712    }
1713    case kCheckedLoadInt8:
1714      ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
1715      break;
1716    case kCheckedLoadUint8:
1717      ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
1718      break;
1719    case kCheckedLoadInt16:
1720      ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
1721      break;
1722    case kCheckedLoadUint16:
1723      ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
1724      break;
1725    case kCheckedLoadWord32:
1726      ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
1727      break;
1728    case kCheckedLoadWord64:
1729      ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
1730      break;
1731    case kCheckedLoadFloat32:
1732      ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
1733      break;
1734    case kCheckedLoadFloat64:
1735      ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
1736      break;
1737    case kCheckedStoreWord8:
1738      ASSEMBLE_CHECKED_STORE_INTEGER(movb);
1739      break;
1740    case kCheckedStoreWord16:
1741      ASSEMBLE_CHECKED_STORE_INTEGER(movw);
1742      break;
1743    case kCheckedStoreWord32:
1744      ASSEMBLE_CHECKED_STORE_INTEGER(movl);
1745      break;
1746    case kCheckedStoreWord64:
1747      ASSEMBLE_CHECKED_STORE_INTEGER(movq);
1748      break;
1749    case kCheckedStoreFloat32:
1750      ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
1751      break;
1752    case kCheckedStoreFloat64:
1753      ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
1754      break;
1755    case kX64StackCheck:
1756      __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
1757      break;
1758  }
1759}  // NOLINT(readability/fn_size)
1760
1761
1762// Assembles branches after this instruction.
1763void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1764  X64OperandConverter i(this, instr);
1765  Label::Distance flabel_distance =
1766      branch->fallthru ? Label::kNear : Label::kFar;
1767  Label* tlabel = branch->true_label;
1768  Label* flabel = branch->false_label;
1769  switch (branch->condition) {
1770    case kUnorderedEqual:
1771      __ j(parity_even, flabel, flabel_distance);
1772    // Fall through.
1773    case kEqual:
1774      __ j(equal, tlabel);
1775      break;
1776    case kUnorderedNotEqual:
1777      __ j(parity_even, tlabel);
1778    // Fall through.
1779    case kNotEqual:
1780      __ j(not_equal, tlabel);
1781      break;
1782    case kSignedLessThan:
1783      __ j(less, tlabel);
1784      break;
1785    case kSignedGreaterThanOrEqual:
1786      __ j(greater_equal, tlabel);
1787      break;
1788    case kSignedLessThanOrEqual:
1789      __ j(less_equal, tlabel);
1790      break;
1791    case kSignedGreaterThan:
1792      __ j(greater, tlabel);
1793      break;
1794    case kUnsignedLessThan:
1795      __ j(below, tlabel);
1796      break;
1797    case kUnsignedGreaterThanOrEqual:
1798      __ j(above_equal, tlabel);
1799      break;
1800    case kUnsignedLessThanOrEqual:
1801      __ j(below_equal, tlabel);
1802      break;
1803    case kUnsignedGreaterThan:
1804      __ j(above, tlabel);
1805      break;
1806    case kOverflow:
1807      __ j(overflow, tlabel);
1808      break;
1809    case kNotOverflow:
1810      __ j(no_overflow, tlabel);
1811      break;
1812    default:
1813      UNREACHABLE();
1814      break;
1815  }
1816  if (!branch->fallthru) __ jmp(flabel, flabel_distance);
1817}
1818
1819
1820void CodeGenerator::AssembleArchJump(RpoNumber target) {
1821  if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
1822}
1823
1824
1825// Assembles boolean materializations after this instruction.
1826void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1827                                        FlagsCondition condition) {
1828  X64OperandConverter i(this, instr);
1829  Label done;
1830
1831  // Materialize a full 64-bit 1 or 0 value. The result register is always the
1832  // last output of the instruction.
1833  Label check;
1834  DCHECK_NE(0u, instr->OutputCount());
1835  Register reg = i.OutputRegister(instr->OutputCount() - 1);
1836  Condition cc = no_condition;
1837  switch (condition) {
1838    case kUnorderedEqual:
1839      __ j(parity_odd, &check, Label::kNear);
1840      __ movl(reg, Immediate(0));
1841      __ jmp(&done, Label::kNear);
1842    // Fall through.
1843    case kEqual:
1844      cc = equal;
1845      break;
1846    case kUnorderedNotEqual:
1847      __ j(parity_odd, &check, Label::kNear);
1848      __ movl(reg, Immediate(1));
1849      __ jmp(&done, Label::kNear);
1850    // Fall through.
1851    case kNotEqual:
1852      cc = not_equal;
1853      break;
1854    case kSignedLessThan:
1855      cc = less;
1856      break;
1857    case kSignedGreaterThanOrEqual:
1858      cc = greater_equal;
1859      break;
1860    case kSignedLessThanOrEqual:
1861      cc = less_equal;
1862      break;
1863    case kSignedGreaterThan:
1864      cc = greater;
1865      break;
1866    case kUnsignedLessThan:
1867      cc = below;
1868      break;
1869    case kUnsignedGreaterThanOrEqual:
1870      cc = above_equal;
1871      break;
1872    case kUnsignedLessThanOrEqual:
1873      cc = below_equal;
1874      break;
1875    case kUnsignedGreaterThan:
1876      cc = above;
1877      break;
1878    case kOverflow:
1879      cc = overflow;
1880      break;
1881    case kNotOverflow:
1882      cc = no_overflow;
1883      break;
1884    default:
1885      UNREACHABLE();
1886      break;
1887  }
1888  __ bind(&check);
1889  __ setcc(cc, reg);
1890  __ movzxbl(reg, reg);
1891  __ bind(&done);
1892}
1893
1894
1895void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1896  X64OperandConverter i(this, instr);
1897  Register input = i.InputRegister(0);
1898  for (size_t index = 2; index < instr->InputCount(); index += 2) {
1899    __ cmpl(input, Immediate(i.InputInt32(index + 0)));
1900    __ j(equal, GetLabel(i.InputRpo(index + 1)));
1901  }
1902  AssembleArchJump(i.InputRpo(1));
1903}
1904
1905
1906void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1907  X64OperandConverter i(this, instr);
1908  Register input = i.InputRegister(0);
1909  int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1910  Label** cases = zone()->NewArray<Label*>(case_count);
1911  for (int32_t index = 0; index < case_count; ++index) {
1912    cases[index] = GetLabel(i.InputRpo(index + 2));
1913  }
1914  Label* const table = AddJumpTable(cases, case_count);
1915  __ cmpl(input, Immediate(case_count));
1916  __ j(above_equal, GetLabel(i.InputRpo(1)));
1917  __ leaq(kScratchRegister, Operand(table));
1918  __ jmp(Operand(kScratchRegister, input, times_8, 0));
1919}
1920
1921
1922void CodeGenerator::AssembleDeoptimizerCall(
1923    int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1924  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1925      isolate(), deoptimization_id, bailout_type);
1926  __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1927}
1928
1929
1930namespace {
1931
1932static const int kQuadWordSize = 16;
1933
1934}  // namespace
1935
1936
1937void CodeGenerator::AssemblePrologue() {
1938  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1939  if (frame_access_state()->has_frame()) {
1940    if (descriptor->IsCFunctionCall()) {
1941      __ pushq(rbp);
1942      __ movq(rbp, rsp);
1943    } else if (descriptor->IsJSFunctionCall()) {
1944      __ Prologue(this->info()->GeneratePreagedPrologue());
1945    } else {
1946      __ StubPrologue(info()->GetOutputStackFrameType());
1947    }
1948  }
1949  int stack_shrink_slots = frame()->GetSpillSlotCount();
1950  if (info()->is_osr()) {
1951    // TurboFan OSR-compiled functions cannot be entered directly.
1952    __ Abort(kShouldNotDirectlyEnterOsrFunction);
1953
1954    // Unoptimized code jumps directly to this entrypoint while the unoptimized
1955    // frame is still on the stack. Optimized code uses OSR values directly from
1956    // the unoptimized frame. Thus, all that needs to be done is to allocate the
1957    // remaining stack slots.
1958    if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1959    osr_pc_offset_ = __ pc_offset();
1960    stack_shrink_slots -=
1961        static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
1962  }
1963
1964  const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
1965  if (saves_fp != 0) {
1966    stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1967  }
1968  if (stack_shrink_slots > 0) {
1969    __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
1970  }
1971
1972  if (saves_fp != 0) {  // Save callee-saved XMM registers.
1973    const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
1974    const int stack_size = saves_fp_count * kQuadWordSize;
1975    // Adjust the stack pointer.
1976    __ subp(rsp, Immediate(stack_size));
1977    // Store the registers on the stack.
1978    int slot_idx = 0;
1979    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
1980      if (!((1 << i) & saves_fp)) continue;
1981      __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
1982                XMMRegister::from_code(i));
1983      slot_idx++;
1984    }
1985    frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
1986                                              (kQuadWordSize / kPointerSize));
1987  }
1988
1989  const RegList saves = descriptor->CalleeSavedRegisters();
1990  if (saves != 0) {  // Save callee-saved registers.
1991    for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1992      if (!((1 << i) & saves)) continue;
1993      __ pushq(Register::from_code(i));
1994      frame()->AllocateSavedCalleeRegisterSlots(1);
1995    }
1996  }
1997}
1998
1999
2000void CodeGenerator::AssembleReturn() {
2001  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2002
2003  // Restore registers.
2004  const RegList saves = descriptor->CalleeSavedRegisters();
2005  if (saves != 0) {
2006    for (int i = 0; i < Register::kNumRegisters; i++) {
2007      if (!((1 << i) & saves)) continue;
2008      __ popq(Register::from_code(i));
2009    }
2010  }
2011  const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
2012  if (saves_fp != 0) {
2013    const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
2014    const int stack_size = saves_fp_count * kQuadWordSize;
2015    // Load the registers from the stack.
2016    int slot_idx = 0;
2017    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
2018      if (!((1 << i) & saves_fp)) continue;
2019      __ movdqu(XMMRegister::from_code(i),
2020                Operand(rsp, kQuadWordSize * slot_idx));
2021      slot_idx++;
2022    }
2023    // Adjust the stack pointer.
2024    __ addp(rsp, Immediate(stack_size));
2025  }
2026
2027  if (descriptor->IsCFunctionCall()) {
2028    AssembleDeconstructFrame();
2029  } else if (frame_access_state()->has_frame()) {
2030    // Canonicalize JSFunction return sites for now.
2031    if (return_label_.is_bound()) {
2032      __ jmp(&return_label_);
2033      return;
2034    } else {
2035      __ bind(&return_label_);
2036      AssembleDeconstructFrame();
2037    }
2038  }
2039  size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
2040  // Might need rcx for scratch if pop_size is too big.
2041  DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
2042  __ Ret(static_cast<int>(pop_size), rcx);
2043}
2044
2045
2046void CodeGenerator::AssembleMove(InstructionOperand* source,
2047                                 InstructionOperand* destination) {
2048  X64OperandConverter g(this, nullptr);
2049  // Dispatch on the source and destination operand kinds.  Not all
2050  // combinations are possible.
2051  if (source->IsRegister()) {
2052    DCHECK(destination->IsRegister() || destination->IsStackSlot());
2053    Register src = g.ToRegister(source);
2054    if (destination->IsRegister()) {
2055      __ movq(g.ToRegister(destination), src);
2056    } else {
2057      __ movq(g.ToOperand(destination), src);
2058    }
2059  } else if (source->IsStackSlot()) {
2060    DCHECK(destination->IsRegister() || destination->IsStackSlot());
2061    Operand src = g.ToOperand(source);
2062    if (destination->IsRegister()) {
2063      Register dst = g.ToRegister(destination);
2064      __ movq(dst, src);
2065    } else {
2066      // Spill on demand to use a temporary register for memory-to-memory
2067      // moves.
2068      Register tmp = kScratchRegister;
2069      Operand dst = g.ToOperand(destination);
2070      __ movq(tmp, src);
2071      __ movq(dst, tmp);
2072    }
2073  } else if (source->IsConstant()) {
2074    ConstantOperand* constant_source = ConstantOperand::cast(source);
2075    Constant src = g.ToConstant(constant_source);
2076    if (destination->IsRegister() || destination->IsStackSlot()) {
2077      Register dst = destination->IsRegister() ? g.ToRegister(destination)
2078                                               : kScratchRegister;
2079      switch (src.type()) {
2080        case Constant::kInt32:
2081          // TODO(dcarney): don't need scratch in this case.
2082          __ Set(dst, src.ToInt32());
2083          break;
2084        case Constant::kInt64:
2085          __ Set(dst, src.ToInt64());
2086          break;
2087        case Constant::kFloat32:
2088          __ Move(dst,
2089                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2090          break;
2091        case Constant::kFloat64:
2092          __ Move(dst,
2093                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2094          break;
2095        case Constant::kExternalReference:
2096          __ Move(dst, src.ToExternalReference());
2097          break;
2098        case Constant::kHeapObject: {
2099          Handle<HeapObject> src_object = src.ToHeapObject();
2100          Heap::RootListIndex index;
2101          int slot;
2102          if (IsMaterializableFromFrame(src_object, &slot)) {
2103            __ movp(dst, g.SlotToOperand(slot));
2104          } else if (IsMaterializableFromRoot(src_object, &index)) {
2105            __ LoadRoot(dst, index);
2106          } else {
2107            __ Move(dst, src_object);
2108          }
2109          break;
2110        }
2111        case Constant::kRpoNumber:
2112          UNREACHABLE();  // TODO(dcarney): load of labels on x64.
2113          break;
2114      }
2115      if (destination->IsStackSlot()) {
2116        __ movq(g.ToOperand(destination), kScratchRegister);
2117      }
2118    } else if (src.type() == Constant::kFloat32) {
2119      // TODO(turbofan): Can we do better here?
2120      uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
2121      if (destination->IsDoubleRegister()) {
2122        __ Move(g.ToDoubleRegister(destination), src_const);
2123      } else {
2124        DCHECK(destination->IsDoubleStackSlot());
2125        Operand dst = g.ToOperand(destination);
2126        __ movl(dst, Immediate(src_const));
2127      }
2128    } else {
2129      DCHECK_EQ(Constant::kFloat64, src.type());
2130      uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
2131      if (destination->IsDoubleRegister()) {
2132        __ Move(g.ToDoubleRegister(destination), src_const);
2133      } else {
2134        DCHECK(destination->IsDoubleStackSlot());
2135        __ movq(kScratchRegister, src_const);
2136        __ movq(g.ToOperand(destination), kScratchRegister);
2137      }
2138    }
2139  } else if (source->IsDoubleRegister()) {
2140    XMMRegister src = g.ToDoubleRegister(source);
2141    if (destination->IsDoubleRegister()) {
2142      XMMRegister dst = g.ToDoubleRegister(destination);
2143      __ Movapd(dst, src);
2144    } else {
2145      DCHECK(destination->IsDoubleStackSlot());
2146      Operand dst = g.ToOperand(destination);
2147      __ Movsd(dst, src);
2148    }
2149  } else if (source->IsDoubleStackSlot()) {
2150    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
2151    Operand src = g.ToOperand(source);
2152    if (destination->IsDoubleRegister()) {
2153      XMMRegister dst = g.ToDoubleRegister(destination);
2154      __ Movsd(dst, src);
2155    } else {
2156      // We rely on having xmm0 available as a fixed scratch register.
2157      Operand dst = g.ToOperand(destination);
2158      __ Movsd(xmm0, src);
2159      __ Movsd(dst, xmm0);
2160    }
2161  } else {
2162    UNREACHABLE();
2163  }
2164}
2165
2166
2167void CodeGenerator::AssembleSwap(InstructionOperand* source,
2168                                 InstructionOperand* destination) {
2169  X64OperandConverter g(this, nullptr);
2170  // Dispatch on the source and destination operand kinds.  Not all
2171  // combinations are possible.
2172  if (source->IsRegister() && destination->IsRegister()) {
2173    // Register-register.
2174    Register src = g.ToRegister(source);
2175    Register dst = g.ToRegister(destination);
2176    __ movq(kScratchRegister, src);
2177    __ movq(src, dst);
2178    __ movq(dst, kScratchRegister);
2179  } else if (source->IsRegister() && destination->IsStackSlot()) {
2180    Register src = g.ToRegister(source);
2181    __ pushq(src);
2182    frame_access_state()->IncreaseSPDelta(1);
2183    Operand dst = g.ToOperand(destination);
2184    __ movq(src, dst);
2185    frame_access_state()->IncreaseSPDelta(-1);
2186    dst = g.ToOperand(destination);
2187    __ popq(dst);
2188  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
2189             (source->IsDoubleStackSlot() &&
2190              destination->IsDoubleStackSlot())) {
2191    // Memory-memory.
2192    Register tmp = kScratchRegister;
2193    Operand src = g.ToOperand(source);
2194    Operand dst = g.ToOperand(destination);
2195    __ movq(tmp, dst);
2196    __ pushq(src);
2197    frame_access_state()->IncreaseSPDelta(1);
2198    src = g.ToOperand(source);
2199    __ movq(src, tmp);
2200    frame_access_state()->IncreaseSPDelta(-1);
2201    dst = g.ToOperand(destination);
2202    __ popq(dst);
2203  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
2204    // XMM register-register swap. We rely on having xmm0
2205    // available as a fixed scratch register.
2206    XMMRegister src = g.ToDoubleRegister(source);
2207    XMMRegister dst = g.ToDoubleRegister(destination);
2208    __ Movapd(xmm0, src);
2209    __ Movapd(src, dst);
2210    __ Movapd(dst, xmm0);
2211  } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
2212    // XMM register-memory swap.  We rely on having xmm0
2213    // available as a fixed scratch register.
2214    XMMRegister src = g.ToDoubleRegister(source);
2215    Operand dst = g.ToOperand(destination);
2216    __ Movsd(xmm0, src);
2217    __ Movsd(src, dst);
2218    __ Movsd(dst, xmm0);
2219  } else {
2220    // No other combinations are possible.
2221    UNREACHABLE();
2222  }
2223}
2224
2225
2226void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2227  for (size_t index = 0; index < target_count; ++index) {
2228    __ dq(targets[index]);
2229  }
2230}
2231
2232
2233void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
2234
2235
2236void CodeGenerator::EnsureSpaceForLazyDeopt() {
2237  if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2238    return;
2239  }
2240
2241  int space_needed = Deoptimizer::patch_size();
2242  // Ensure that we have enough space after the previous lazy-bailout
2243  // instruction for patching the code here.
2244  int current_pc = masm()->pc_offset();
2245  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2246    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2247    __ Nop(padding_size);
2248  }
2249}
2250
2251#undef __
2252
2253}  // namespace compiler
2254}  // namespace internal
2255}  // namespace v8
2256