1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/compilation-info.h"
8#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
11#include "src/compiler/osr.h"
12#include "src/s390/macro-assembler-s390.h"
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18#define __ masm()->
19
20#define kScratchReg ip
21
22// Adds S390-specific methods to convert InstructionOperands.
23class S390OperandConverter final : public InstructionOperandConverter {
24 public:
25  S390OperandConverter(CodeGenerator* gen, Instruction* instr)
26      : InstructionOperandConverter(gen, instr) {}
27
28  size_t OutputCount() { return instr_->OutputCount(); }
29
30  bool Is64BitOperand(int index) {
31    return LocationOperand::cast(instr_->InputAt(index))->representation() ==
32           MachineRepresentation::kWord64;
33  }
34
35  bool Is32BitOperand(int index) {
36    return LocationOperand::cast(instr_->InputAt(index))->representation() ==
37           MachineRepresentation::kWord32;
38  }
39
40  bool CompareLogical() const {
41    switch (instr_->flags_condition()) {
42      case kUnsignedLessThan:
43      case kUnsignedGreaterThanOrEqual:
44      case kUnsignedLessThanOrEqual:
45      case kUnsignedGreaterThan:
46        return true;
47      default:
48        return false;
49    }
50    UNREACHABLE();
51    return false;
52  }
53
54  Operand InputImmediate(size_t index) {
55    Constant constant = ToConstant(instr_->InputAt(index));
56    switch (constant.type()) {
57      case Constant::kInt32:
58        return Operand(constant.ToInt32());
59      case Constant::kFloat32:
60        return Operand(
61            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
62      case Constant::kFloat64:
63        return Operand(
64            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
65      case Constant::kInt64:
66#if V8_TARGET_ARCH_S390X
67        return Operand(constant.ToInt64());
68#endif
69      case Constant::kExternalReference:
70      case Constant::kHeapObject:
71      case Constant::kRpoNumber:
72        break;
73    }
74    UNREACHABLE();
75    return Operand::Zero();
76  }
77
78  MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
79    const size_t index = *first_index;
80    if (mode) *mode = AddressingModeField::decode(instr_->opcode());
81    switch (AddressingModeField::decode(instr_->opcode())) {
82      case kMode_None:
83        break;
84      case kMode_MR:
85        *first_index += 1;
86        return MemOperand(InputRegister(index + 0), 0);
87      case kMode_MRI:
88        *first_index += 2;
89        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
90      case kMode_MRR:
91        *first_index += 2;
92        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
93      case kMode_MRRI:
94        *first_index += 3;
95        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
96                          InputInt32(index + 2));
97    }
98    UNREACHABLE();
99    return MemOperand(r0);
100  }
101
102  MemOperand MemoryOperand(AddressingMode* mode = NULL,
103                           size_t first_index = 0) {
104    return MemoryOperand(mode, &first_index);
105  }
106
107  MemOperand ToMemOperand(InstructionOperand* op) const {
108    DCHECK_NOT_NULL(op);
109    DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
110    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
111  }
112
113  MemOperand SlotToMemOperand(int slot) const {
114    FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
115    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
116  }
117
118  MemOperand InputStackSlot(size_t index) {
119    InstructionOperand* op = instr_->InputAt(index);
120    return SlotToMemOperand(AllocatedOperand::cast(op)->index());
121  }
122
123  MemOperand InputStackSlot32(size_t index) {
124#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
125    // We want to read the 32-bits directly from memory
126    MemOperand mem = InputStackSlot(index);
127    return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
128#else
129    return InputStackSlot(index);
130#endif
131  }
132};
133
134static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
135  return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
136}
137
138static inline bool HasRegisterInput(Instruction* instr, int index) {
139  return instr->InputAt(index)->IsRegister();
140}
141
142static inline bool HasFPRegisterInput(Instruction* instr, int index) {
143  return instr->InputAt(index)->IsFPRegister();
144}
145
146static inline bool HasImmediateInput(Instruction* instr, size_t index) {
147  return instr->InputAt(index)->IsImmediate();
148}
149
150static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
151  return instr->InputAt(index)->IsStackSlot();
152}
153
154static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
155  return instr->InputAt(index)->IsFPStackSlot();
156}
157
158namespace {
159
160class OutOfLineLoadNAN32 final : public OutOfLineCode {
161 public:
162  OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
163      : OutOfLineCode(gen), result_(result) {}
164
165  void Generate() final {
166    __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
167                         kScratchReg);
168  }
169
170 private:
171  DoubleRegister const result_;
172};
173
174class OutOfLineLoadNAN64 final : public OutOfLineCode {
175 public:
176  OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
177      : OutOfLineCode(gen), result_(result) {}
178
179  void Generate() final {
180    __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
181                         kScratchReg);
182  }
183
184 private:
185  DoubleRegister const result_;
186};
187
188class OutOfLineLoadZero final : public OutOfLineCode {
189 public:
190  OutOfLineLoadZero(CodeGenerator* gen, Register result)
191      : OutOfLineCode(gen), result_(result) {}
192
193  void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
194
195 private:
196  Register const result_;
197};
198
199class OutOfLineRecordWrite final : public OutOfLineCode {
200 public:
201  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
202                       Register value, Register scratch0, Register scratch1,
203                       RecordWriteMode mode)
204      : OutOfLineCode(gen),
205        object_(object),
206        offset_(offset),
207        offset_immediate_(0),
208        value_(value),
209        scratch0_(scratch0),
210        scratch1_(scratch1),
211        mode_(mode),
212        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
213
214  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
215                       Register value, Register scratch0, Register scratch1,
216                       RecordWriteMode mode)
217      : OutOfLineCode(gen),
218        object_(object),
219        offset_(no_reg),
220        offset_immediate_(offset),
221        value_(value),
222        scratch0_(scratch0),
223        scratch1_(scratch1),
224        mode_(mode),
225        must_save_lr_(!gen->frame_access_state()->has_frame()) {}
226
227  void Generate() final {
228    if (mode_ > RecordWriteMode::kValueIsPointer) {
229      __ JumpIfSmi(value_, exit());
230    }
231    __ CheckPageFlag(value_, scratch0_,
232                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
233                     exit());
234    RememberedSetAction const remembered_set_action =
235        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
236                                             : OMIT_REMEMBERED_SET;
237    SaveFPRegsMode const save_fp_mode =
238        frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
239    if (must_save_lr_) {
240      // We need to save and restore r14 if the frame was elided.
241      __ Push(r14);
242    }
243    RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
244                         remembered_set_action, save_fp_mode);
245    if (offset_.is(no_reg)) {
246      __ AddP(scratch1_, object_, Operand(offset_immediate_));
247    } else {
248      DCHECK_EQ(0, offset_immediate_);
249      __ AddP(scratch1_, object_, offset_);
250    }
251    __ CallStub(&stub);
252    if (must_save_lr_) {
253      // We need to save and restore r14 if the frame was elided.
254      __ Pop(r14);
255    }
256  }
257
258 private:
259  Register const object_;
260  Register const offset_;
261  int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
262  Register const value_;
263  Register const scratch0_;
264  Register const scratch1_;
265  RecordWriteMode const mode_;
266  bool must_save_lr_;
267};
268
269Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
270  switch (condition) {
271    case kEqual:
272      return eq;
273    case kNotEqual:
274      return ne;
275    case kUnsignedLessThan:
276      // unsigned number never less than 0
277      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
278        return CC_NOP;
279    // fall through
280    case kSignedLessThan:
281      return lt;
282    case kUnsignedGreaterThanOrEqual:
283      // unsigned number always greater than or equal 0
284      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
285        return CC_ALWAYS;
286    // fall through
287    case kSignedGreaterThanOrEqual:
288      return ge;
289    case kUnsignedLessThanOrEqual:
290      // unsigned number never less than 0
291      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
292        return CC_EQ;
293    // fall through
294    case kSignedLessThanOrEqual:
295      return le;
296    case kUnsignedGreaterThan:
297      // unsigned number always greater than or equal 0
298      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
299        return ne;
300    // fall through
301    case kSignedGreaterThan:
302      return gt;
303    case kOverflow:
304      // Overflow checked for AddP/SubP only.
305      switch (op) {
306        case kS390_Add32:
307        case kS390_Add64:
308        case kS390_Sub32:
309        case kS390_Sub64:
310          return overflow;
311        default:
312          break;
313      }
314      break;
315    case kNotOverflow:
316      switch (op) {
317        case kS390_Add32:
318        case kS390_Add64:
319        case kS390_Sub32:
320        case kS390_Sub64:
321          return nooverflow;
322        default:
323          break;
324      }
325      break;
326    default:
327      break;
328  }
329  UNREACHABLE();
330  return kNoCondition;
331}
332
333typedef void (MacroAssembler::*RRTypeInstr)(Register, Register);
334typedef void (MacroAssembler::*RMTypeInstr)(Register, const MemOperand&);
335typedef void (MacroAssembler::*RITypeInstr)(Register, const Operand&);
336typedef void (MacroAssembler::*RRRTypeInstr)(Register, Register, Register);
337typedef void (MacroAssembler::*RRMTypeInstr)(Register, Register,
338                                             const MemOperand&);
339typedef void (MacroAssembler::*RRITypeInstr)(Register, Register,
340                                             const Operand&);
341
342#define CHECK_AND_ZERO_EXT_OUTPUT(num)                                   \
343  {                                                                      \
344    CHECK(HasImmediateInput(instr, (num)));                              \
345    int doZeroExt = i.InputInt32(num);                                   \
346    if (doZeroExt) masm->LoadlW(i.OutputRegister(), i.OutputRegister()); \
347  }
348
349void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
350                   Instruction* instr, RRTypeInstr rr_instr,
351                   RMTypeInstr rm_instr, RITypeInstr ri_instr) {
352  CHECK(i.OutputRegister().is(i.InputRegister(0)));
353  AddressingMode mode = AddressingModeField::decode(instr->opcode());
354  int zeroExtIndex = 2;
355  if (mode != kMode_None) {
356    size_t first_index = 1;
357    MemOperand operand = i.MemoryOperand(&mode, &first_index);
358    zeroExtIndex = first_index;
359    CHECK(rm_instr != NULL);
360    (masm->*rm_instr)(i.OutputRegister(), operand);
361  } else if (HasRegisterInput(instr, 1)) {
362    (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
363  } else if (HasImmediateInput(instr, 1)) {
364    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
365  } else if (HasStackSlotInput(instr, 1)) {
366    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
367  } else {
368    UNREACHABLE();
369  }
370  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
371}
372
373void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
374                   Instruction* instr, RRRTypeInstr rrr_instr,
375                   RMTypeInstr rm_instr, RITypeInstr ri_instr) {
376  AddressingMode mode = AddressingModeField::decode(instr->opcode());
377  int zeroExtIndex = 2;
378  if (mode != kMode_None) {
379    CHECK(i.OutputRegister().is(i.InputRegister(0)));
380    size_t first_index = 1;
381    MemOperand operand = i.MemoryOperand(&mode, &first_index);
382    zeroExtIndex = first_index;
383    CHECK(rm_instr != NULL);
384    (masm->*rm_instr)(i.OutputRegister(), operand);
385  } else if (HasRegisterInput(instr, 1)) {
386    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
387                       i.InputRegister(1));
388  } else if (HasImmediateInput(instr, 1)) {
389    CHECK(i.OutputRegister().is(i.InputRegister(0)));
390    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
391  } else if (HasStackSlotInput(instr, 1)) {
392    CHECK(i.OutputRegister().is(i.InputRegister(0)));
393    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
394  } else {
395    UNREACHABLE();
396  }
397  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
398}
399
400void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
401                   Instruction* instr, RRRTypeInstr rrr_instr,
402                   RMTypeInstr rm_instr, RRITypeInstr rri_instr) {
403  AddressingMode mode = AddressingModeField::decode(instr->opcode());
404  int zeroExtIndex = 2;
405  if (mode != kMode_None) {
406    CHECK(i.OutputRegister().is(i.InputRegister(0)));
407    size_t first_index = 1;
408    MemOperand operand = i.MemoryOperand(&mode, &first_index);
409    zeroExtIndex = first_index;
410    CHECK(rm_instr != NULL);
411    (masm->*rm_instr)(i.OutputRegister(), operand);
412  } else if (HasRegisterInput(instr, 1)) {
413    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
414                       i.InputRegister(1));
415  } else if (HasImmediateInput(instr, 1)) {
416    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
417                       i.InputImmediate(1));
418  } else if (HasStackSlotInput(instr, 1)) {
419    CHECK(i.OutputRegister().is(i.InputRegister(0)));
420    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
421  } else {
422    UNREACHABLE();
423  }
424  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
425}
426
427void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
428                   Instruction* instr, RRRTypeInstr rrr_instr,
429                   RRMTypeInstr rrm_instr, RRITypeInstr rri_instr) {
430  AddressingMode mode = AddressingModeField::decode(instr->opcode());
431  int zeroExtIndex = 2;
432  if (mode != kMode_None) {
433    size_t first_index = 1;
434    MemOperand operand = i.MemoryOperand(&mode, &first_index);
435    zeroExtIndex = first_index;
436    CHECK(rrm_instr != NULL);
437    (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0), operand);
438  } else if (HasRegisterInput(instr, 1)) {
439    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
440                       i.InputRegister(1));
441  } else if (HasImmediateInput(instr, 1)) {
442    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
443                       i.InputImmediate(1));
444  } else if (HasStackSlotInput(instr, 1)) {
445    (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0),
446                       i.InputStackSlot32(1));
447  } else {
448    UNREACHABLE();
449  }
450  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
451}
452
453void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
454                   Instruction* instr, RRRTypeInstr rrr_instr,
455                   RRITypeInstr rri_instr) {
456  AddressingMode mode = AddressingModeField::decode(instr->opcode());
457  CHECK(mode == kMode_None);
458  int zeroExtIndex = 2;
459  if (HasRegisterInput(instr, 1)) {
460    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
461                       i.InputRegister(1));
462  } else if (HasImmediateInput(instr, 1)) {
463    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
464                       i.InputImmediate(1));
465  } else {
466    UNREACHABLE();
467  }
468  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
469}
470
471void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
472                   Instruction* instr, RRTypeInstr rr_instr,
473                   RITypeInstr ri_instr) {
474  AddressingMode mode = AddressingModeField::decode(instr->opcode());
475  CHECK(mode == kMode_None);
476  CHECK(i.OutputRegister().is(i.InputRegister(0)));
477  int zeroExtIndex = 2;
478  if (HasRegisterInput(instr, 1)) {
479    (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
480  } else if (HasImmediateInput(instr, 1)) {
481    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
482  } else {
483    UNREACHABLE();
484  }
485  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
486}
487
488#define ASSEMBLE_BIN_OP(instr1, instr2, instr3)            \
489  AssembleBinOp(i, masm(), instr, &MacroAssembler::instr1, \
490                &MacroAssembler::instr2, &MacroAssembler::instr3)
491
492#undef CHECK_AND_ZERO_EXT_OUTPUT
493
494}  // namespace
495
496#define CHECK_AND_ZERO_EXT_OUTPUT(num)                                \
497  {                                                                   \
498    CHECK(HasImmediateInput(instr, (num)));                           \
499    int doZeroExt = i.InputInt32(num);                                \
500    if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
501  }
502
503#define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
504  do {                                                                \
505    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
506  } while (0)
507
508#define ASSEMBLE_FLOAT_BINOP(asm_instr)                              \
509  do {                                                               \
510    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
511                 i.InputDoubleRegister(1));                          \
512  } while (0)
513
514#define ASSEMBLE_BINOP(asm_instr)                          \
515  do {                                                     \
516    if (HasRegisterInput(instr, 1)) {                      \
517      __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
518                   i.InputRegister(1));                    \
519    } else if (HasImmediateInput(instr, 1)) {              \
520      __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
521                   i.InputImmediate(1));                   \
522    } else {                                               \
523      UNIMPLEMENTED();                                     \
524    }                                                      \
525  } while (0)
526
527#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                         \
528  do {                                                                  \
529    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
530    if (mode != kMode_None) {                                           \
531      size_t first_index = 1;                                           \
532      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
533      if (i.CompareLogical()) {                                         \
534        __ cmpl_instr(i.InputRegister(0), operand);                     \
535      } else {                                                          \
536        __ cmp_instr(i.InputRegister(0), operand);                      \
537      }                                                                 \
538    } else if (HasRegisterInput(instr, 1)) {                            \
539      if (i.CompareLogical()) {                                         \
540        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
541      } else {                                                          \
542        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
543      }                                                                 \
544    } else if (HasImmediateInput(instr, 1)) {                           \
545      if (i.CompareLogical()) {                                         \
546        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
547      } else {                                                          \
548        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
549      }                                                                 \
550    } else {                                                            \
551      DCHECK(HasStackSlotInput(instr, 1));                              \
552      if (i.CompareLogical()) {                                         \
553        __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1));         \
554      } else {                                                          \
555        __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1));          \
556      }                                                                 \
557    }                                                                   \
558  } while (0)
559
560#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr)                       \
561  do {                                                                  \
562    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
563    if (mode != kMode_None) {                                           \
564      size_t first_index = 1;                                           \
565      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
566      if (i.CompareLogical()) {                                         \
567        __ cmpl_instr(i.InputRegister(0), operand);                     \
568      } else {                                                          \
569        __ cmp_instr(i.InputRegister(0), operand);                      \
570      }                                                                 \
571    } else if (HasRegisterInput(instr, 1)) {                            \
572      if (i.CompareLogical()) {                                         \
573        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
574      } else {                                                          \
575        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
576      }                                                                 \
577    } else if (HasImmediateInput(instr, 1)) {                           \
578      if (i.CompareLogical()) {                                         \
579        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
580      } else {                                                          \
581        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
582      }                                                                 \
583    } else {                                                            \
584      DCHECK(HasStackSlotInput(instr, 1));                              \
585      if (i.CompareLogical()) {                                         \
586        __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1));       \
587      } else {                                                          \
588        __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1));        \
589      }                                                                 \
590    }                                                                   \
591  } while (0)
592
593#define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr)     \
594  do {                                                                     \
595    AddressingMode mode = AddressingModeField::decode(instr->opcode());    \
596    if (mode != kMode_None) {                                              \
597      size_t first_index = 1;                                              \
598      MemOperand operand = i.MemoryOperand(&mode, &first_index);           \
599      __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                  \
600    } else if (HasFPRegisterInput(instr, 1)) {                             \
601      __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
602    } else {                                                               \
603      USE(HasFPStackSlotInput);                                            \
604      DCHECK(HasFPStackSlotInput(instr, 1));                               \
605      MemOperand operand = i.InputStackSlot(1);                            \
606      if (operand.offset() >= 0) {                                         \
607        __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                \
608      } else {                                                             \
609        __ load_instr(kScratchDoubleReg, operand);                         \
610        __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg);      \
611      }                                                                    \
612    }                                                                      \
613  } while (0)
614
615// Divide instruction dr will implicity use register pair
616// r0 & r1 below.
617// R0:R1 = R1 / divisor - R0 remainder
618// Copy remainder to output reg
619#define ASSEMBLE_MODULO(div_instr, shift_instr) \
620  do {                                          \
621    __ LoadRR(r0, i.InputRegister(0));          \
622    __ shift_instr(r0, Operand(32));            \
623    __ div_instr(r0, i.InputRegister(1));       \
624    __ LoadlW(i.OutputRegister(), r0);          \
625  } while (0)
626
627#define ASSEMBLE_FLOAT_MODULO()                                               \
628  do {                                                                        \
629    FrameScope scope(masm(), StackFrame::MANUAL);                             \
630    __ PrepareCallCFunction(0, 2, kScratchReg);                               \
631    __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
632                            i.InputDoubleRegister(1));                        \
633    __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
634                     0, 2);                                                   \
635    __ MovFromFloatResult(i.OutputDoubleRegister());                          \
636  } while (0)
637
638#define ASSEMBLE_IEEE754_UNOP(name)                                            \
639  do {                                                                         \
640    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
641    /* and generate a CallAddress instruction instead. */                      \
642    FrameScope scope(masm(), StackFrame::MANUAL);                              \
643    __ PrepareCallCFunction(0, 1, kScratchReg);                                \
644    __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
645    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
646                     0, 1);                                                    \
647    /* Move the result in the double result register. */                       \
648    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
649  } while (0)
650
651#define ASSEMBLE_IEEE754_BINOP(name)                                           \
652  do {                                                                         \
653    /* TODO(bmeurer): We should really get rid of this special instruction, */ \
654    /* and generate a CallAddress instruction instead. */                      \
655    FrameScope scope(masm(), StackFrame::MANUAL);                              \
656    __ PrepareCallCFunction(0, 2, kScratchReg);                                \
657    __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
658                            i.InputDoubleRegister(1));                         \
659    __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
660                     0, 2);                                                    \
661    /* Move the result in the double result register. */                       \
662    __ MovFromFloatResult(i.OutputDoubleRegister());                           \
663  } while (0)
664
665#define ASSEMBLE_DOUBLE_MAX()                                          \
666  do {                                                                 \
667    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
668    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
669    DoubleRegister result_reg = i.OutputDoubleRegister();              \
670    Label check_nan_left, check_zero, return_left, return_right, done; \
671    __ cdbr(left_reg, right_reg);                                      \
672    __ bunordered(&check_nan_left, Label::kNear);                      \
673    __ beq(&check_zero);                                               \
674    __ bge(&return_left, Label::kNear);                                \
675    __ b(&return_right, Label::kNear);                                 \
676                                                                       \
677    __ bind(&check_zero);                                              \
678    __ lzdr(kDoubleRegZero);                                           \
679    __ cdbr(left_reg, kDoubleRegZero);                                 \
680    /* left == right != 0. */                                          \
681    __ bne(&return_left, Label::kNear);                                \
682    /* At this point, both left and right are either 0 or -0. */       \
683    /* N.B. The following works because +0 + -0 == +0 */               \
684    /* For max we want logical-and of sign bit: (L + R) */             \
685    __ ldr(result_reg, left_reg);                                      \
686    __ adbr(result_reg, right_reg);                                    \
687    __ b(&done, Label::kNear);                                         \
688                                                                       \
689    __ bind(&check_nan_left);                                          \
690    __ cdbr(left_reg, left_reg);                                       \
691    /* left == NaN. */                                                 \
692    __ bunordered(&return_left, Label::kNear);                         \
693                                                                       \
694    __ bind(&return_right);                                            \
695    if (!right_reg.is(result_reg)) {                                   \
696      __ ldr(result_reg, right_reg);                                   \
697    }                                                                  \
698    __ b(&done, Label::kNear);                                         \
699                                                                       \
700    __ bind(&return_left);                                             \
701    if (!left_reg.is(result_reg)) {                                    \
702      __ ldr(result_reg, left_reg);                                    \
703    }                                                                  \
704    __ bind(&done);                                                    \
705  } while (0)
706
707#define ASSEMBLE_DOUBLE_MIN()                                          \
708  do {                                                                 \
709    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
710    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
711    DoubleRegister result_reg = i.OutputDoubleRegister();              \
712    Label check_nan_left, check_zero, return_left, return_right, done; \
713    __ cdbr(left_reg, right_reg);                                      \
714    __ bunordered(&check_nan_left, Label::kNear);                      \
715    __ beq(&check_zero);                                               \
716    __ ble(&return_left, Label::kNear);                                \
717    __ b(&return_right, Label::kNear);                                 \
718                                                                       \
719    __ bind(&check_zero);                                              \
720    __ lzdr(kDoubleRegZero);                                           \
721    __ cdbr(left_reg, kDoubleRegZero);                                 \
722    /* left == right != 0. */                                          \
723    __ bne(&return_left, Label::kNear);                                \
724    /* At this point, both left and right are either 0 or -0. */       \
725    /* N.B. The following works because +0 + -0 == +0 */               \
726    /* For min we want logical-or of sign bit: -(-L + -R) */           \
727    __ lcdbr(left_reg, left_reg);                                      \
728    __ ldr(result_reg, left_reg);                                      \
729    if (left_reg.is(right_reg)) {                                      \
730      __ adbr(result_reg, right_reg);                                  \
731    } else {                                                           \
732      __ sdbr(result_reg, right_reg);                                  \
733    }                                                                  \
734    __ lcdbr(result_reg, result_reg);                                  \
735    __ b(&done, Label::kNear);                                         \
736                                                                       \
737    __ bind(&check_nan_left);                                          \
738    __ cdbr(left_reg, left_reg);                                       \
739    /* left == NaN. */                                                 \
740    __ bunordered(&return_left, Label::kNear);                         \
741                                                                       \
742    __ bind(&return_right);                                            \
743    if (!right_reg.is(result_reg)) {                                   \
744      __ ldr(result_reg, right_reg);                                   \
745    }                                                                  \
746    __ b(&done, Label::kNear);                                         \
747                                                                       \
748    __ bind(&return_left);                                             \
749    if (!left_reg.is(result_reg)) {                                    \
750      __ ldr(result_reg, left_reg);                                    \
751    }                                                                  \
752    __ bind(&done);                                                    \
753  } while (0)
754
755#define ASSEMBLE_FLOAT_MAX()                                           \
756  do {                                                                 \
757    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
758    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
759    DoubleRegister result_reg = i.OutputDoubleRegister();              \
760    Label check_nan_left, check_zero, return_left, return_right, done; \
761    __ cebr(left_reg, right_reg);                                      \
762    __ bunordered(&check_nan_left, Label::kNear);                      \
763    __ beq(&check_zero);                                               \
764    __ bge(&return_left, Label::kNear);                                \
765    __ b(&return_right, Label::kNear);                                 \
766                                                                       \
767    __ bind(&check_zero);                                              \
768    __ lzdr(kDoubleRegZero);                                           \
769    __ cebr(left_reg, kDoubleRegZero);                                 \
770    /* left == right != 0. */                                          \
771    __ bne(&return_left, Label::kNear);                                \
772    /* At this point, both left and right are either 0 or -0. */       \
773    /* N.B. The following works because +0 + -0 == +0 */               \
774    /* For max we want logical-and of sign bit: (L + R) */             \
775    __ ldr(result_reg, left_reg);                                      \
776    __ aebr(result_reg, right_reg);                                    \
777    __ b(&done, Label::kNear);                                         \
778                                                                       \
779    __ bind(&check_nan_left);                                          \
780    __ cebr(left_reg, left_reg);                                       \
781    /* left == NaN. */                                                 \
782    __ bunordered(&return_left, Label::kNear);                         \
783                                                                       \
784    __ bind(&return_right);                                            \
785    if (!right_reg.is(result_reg)) {                                   \
786      __ ldr(result_reg, right_reg);                                   \
787    }                                                                  \
788    __ b(&done, Label::kNear);                                         \
789                                                                       \
790    __ bind(&return_left);                                             \
791    if (!left_reg.is(result_reg)) {                                    \
792      __ ldr(result_reg, left_reg);                                    \
793    }                                                                  \
794    __ bind(&done);                                                    \
795  } while (0)
796
797#define ASSEMBLE_FLOAT_MIN()                                           \
798  do {                                                                 \
799    DoubleRegister left_reg = i.InputDoubleRegister(0);                \
800    DoubleRegister right_reg = i.InputDoubleRegister(1);               \
801    DoubleRegister result_reg = i.OutputDoubleRegister();              \
802    Label check_nan_left, check_zero, return_left, return_right, done; \
803    __ cebr(left_reg, right_reg);                                      \
804    __ bunordered(&check_nan_left, Label::kNear);                      \
805    __ beq(&check_zero);                                               \
806    __ ble(&return_left, Label::kNear);                                \
807    __ b(&return_right, Label::kNear);                                 \
808                                                                       \
809    __ bind(&check_zero);                                              \
810    __ lzdr(kDoubleRegZero);                                           \
811    __ cebr(left_reg, kDoubleRegZero);                                 \
812    /* left == right != 0. */                                          \
813    __ bne(&return_left, Label::kNear);                                \
814    /* At this point, both left and right are either 0 or -0. */       \
815    /* N.B. The following works because +0 + -0 == +0 */               \
816    /* For min we want logical-or of sign bit: -(-L + -R) */           \
817    __ lcebr(left_reg, left_reg);                                      \
818    __ ldr(result_reg, left_reg);                                      \
819    if (left_reg.is(right_reg)) {                                      \
820      __ aebr(result_reg, right_reg);                                  \
821    } else {                                                           \
822      __ sebr(result_reg, right_reg);                                  \
823    }                                                                  \
824    __ lcebr(result_reg, result_reg);                                  \
825    __ b(&done, Label::kNear);                                         \
826                                                                       \
827    __ bind(&check_nan_left);                                          \
828    __ cebr(left_reg, left_reg);                                       \
829    /* left == NaN. */                                                 \
830    __ bunordered(&return_left, Label::kNear);                         \
831                                                                       \
832    __ bind(&return_right);                                            \
833    if (!right_reg.is(result_reg)) {                                   \
834      __ ldr(result_reg, right_reg);                                   \
835    }                                                                  \
836    __ b(&done, Label::kNear);                                         \
837                                                                       \
838    __ bind(&return_left);                                             \
839    if (!left_reg.is(result_reg)) {                                    \
840      __ ldr(result_reg, left_reg);                                    \
841    }                                                                  \
842    __ bind(&done);                                                    \
843  } while (0)
844//
845// Only MRI mode for these instructions available
846#define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
847  do {                                                \
848    DoubleRegister result = i.OutputDoubleRegister(); \
849    AddressingMode mode = kMode_None;                 \
850    MemOperand operand = i.MemoryOperand(&mode);      \
851    __ asm_instr(result, operand);                    \
852  } while (0)
853
854#define ASSEMBLE_LOAD_INTEGER(asm_instr)         \
855  do {                                           \
856    Register result = i.OutputRegister();        \
857    AddressingMode mode = kMode_None;            \
858    MemOperand operand = i.MemoryOperand(&mode); \
859    __ asm_instr(result, operand);               \
860  } while (0)
861
862#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm)              \
863  {                                                                     \
864    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
865    Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
866    if (mode != kMode_None) {                                           \
867      size_t first_index = 0;                                           \
868      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
869      __ asm_instr_rm(dst, operand);                                    \
870    } else if (HasRegisterInput(instr, 0)) {                            \
871      __ asm_instr_rr(dst, i.InputRegister(0));                         \
872    } else {                                                            \
873      DCHECK(HasStackSlotInput(instr, 0));                              \
874      __ asm_instr_rm(dst, i.InputStackSlot(0));                        \
875    }                                                                   \
876  }
877
878#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm)              \
879  {                                                                     \
880    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
881    Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
882    if (mode != kMode_None) {                                           \
883      size_t first_index = 0;                                           \
884      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
885      __ asm_instr_rm(dst, operand);                                    \
886    } else if (HasRegisterInput(instr, 0)) {                            \
887      __ asm_instr_rr(dst, i.InputRegister(0));                         \
888    } else {                                                            \
889      DCHECK(HasStackSlotInput(instr, 0));                              \
890      __ asm_instr_rm(dst, i.InputStackSlot32(0));                      \
891    }                                                                   \
892  }
893
894#define ASSEMBLE_STORE_FLOAT32()                         \
895  do {                                                   \
896    size_t index = 0;                                    \
897    AddressingMode mode = kMode_None;                    \
898    MemOperand operand = i.MemoryOperand(&mode, &index); \
899    DoubleRegister value = i.InputDoubleRegister(index); \
900    __ StoreFloat32(value, operand);                     \
901  } while (0)
902
903#define ASSEMBLE_STORE_DOUBLE()                          \
904  do {                                                   \
905    size_t index = 0;                                    \
906    AddressingMode mode = kMode_None;                    \
907    MemOperand operand = i.MemoryOperand(&mode, &index); \
908    DoubleRegister value = i.InputDoubleRegister(index); \
909    __ StoreDouble(value, operand);                      \
910  } while (0)
911
912#define ASSEMBLE_STORE_INTEGER(asm_instr)                \
913  do {                                                   \
914    size_t index = 0;                                    \
915    AddressingMode mode = kMode_None;                    \
916    MemOperand operand = i.MemoryOperand(&mode, &index); \
917    Register value = i.InputRegister(index);             \
918    __ asm_instr(value, operand);                        \
919  } while (0)
920
921#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width)              \
922  do {                                                             \
923    DoubleRegister result = i.OutputDoubleRegister();              \
924    size_t index = 0;                                              \
925    AddressingMode mode = kMode_None;                              \
926    MemOperand operand = i.MemoryOperand(&mode, index);            \
927    Register offset = operand.rb();                                \
928    if (HasRegisterInput(instr, 2)) {                              \
929      __ CmpLogical32(offset, i.InputRegister(2));                 \
930    } else {                                                       \
931      __ CmpLogical32(offset, i.InputImmediate(2));                \
932    }                                                              \
933    auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
934    __ bge(ool->entry());                                          \
935    __ CleanUInt32(offset);                                        \
936    __ asm_instr(result, operand);                                 \
937    __ bind(ool->exit());                                          \
938  } while (0)
939
940#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
941  do {                                                       \
942    Register result = i.OutputRegister();                    \
943    size_t index = 0;                                        \
944    AddressingMode mode = kMode_None;                        \
945    MemOperand operand = i.MemoryOperand(&mode, index);      \
946    Register offset = operand.rb();                          \
947    if (HasRegisterInput(instr, 2)) {                        \
948      __ CmpLogical32(offset, i.InputRegister(2));           \
949    } else {                                                 \
950      __ CmpLogical32(offset, i.InputImmediate(2));          \
951    }                                                        \
952    auto ool = new (zone()) OutOfLineLoadZero(this, result); \
953    __ bge(ool->entry());                                    \
954    __ CleanUInt32(offset);                                  \
955    __ asm_instr(result, operand);                           \
956    __ bind(ool->exit());                                    \
957  } while (0)
958
959#define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
960  do {                                                  \
961    Label done;                                         \
962    size_t index = 0;                                   \
963    AddressingMode mode = kMode_None;                   \
964    MemOperand operand = i.MemoryOperand(&mode, index); \
965    Register offset = operand.rb();                     \
966    if (HasRegisterInput(instr, 2)) {                   \
967      __ CmpLogical32(offset, i.InputRegister(2));      \
968    } else {                                            \
969      __ CmpLogical32(offset, i.InputImmediate(2));     \
970    }                                                   \
971    __ bge(&done);                                      \
972    DoubleRegister value = i.InputDoubleRegister(3);    \
973    __ CleanUInt32(offset);                             \
974    __ StoreFloat32(value, operand);                    \
975    __ bind(&done);                                     \
976  } while (0)
977
978#define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
979  do {                                                  \
980    Label done;                                         \
981    size_t index = 0;                                   \
982    AddressingMode mode = kMode_None;                   \
983    MemOperand operand = i.MemoryOperand(&mode, index); \
984    DCHECK_EQ(kMode_MRR, mode);                         \
985    Register offset = operand.rb();                     \
986    if (HasRegisterInput(instr, 2)) {                   \
987      __ CmpLogical32(offset, i.InputRegister(2));      \
988    } else {                                            \
989      __ CmpLogical32(offset, i.InputImmediate(2));     \
990    }                                                   \
991    __ bge(&done);                                      \
992    DoubleRegister value = i.InputDoubleRegister(3);    \
993    __ CleanUInt32(offset);                             \
994    __ StoreDouble(value, operand);                     \
995    __ bind(&done);                                     \
996  } while (0)
997
998#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)       \
999  do {                                                  \
1000    Label done;                                         \
1001    size_t index = 0;                                   \
1002    AddressingMode mode = kMode_None;                   \
1003    MemOperand operand = i.MemoryOperand(&mode, index); \
1004    Register offset = operand.rb();                     \
1005    if (HasRegisterInput(instr, 2)) {                   \
1006      __ CmpLogical32(offset, i.InputRegister(2));      \
1007    } else {                                            \
1008      __ CmpLogical32(offset, i.InputImmediate(2));     \
1009    }                                                   \
1010    __ bge(&done);                                      \
1011    Register value = i.InputRegister(3);                \
1012    __ CleanUInt32(offset);                             \
1013    __ asm_instr(value, operand);                       \
1014    __ bind(&done);                                     \
1015  } while (0)
1016
1017void CodeGenerator::AssembleDeconstructFrame() {
1018  __ LeaveFrame(StackFrame::MANUAL);
1019}
1020
1021void CodeGenerator::AssemblePrepareTailCall() {
1022  if (frame_access_state()->has_frame()) {
1023    __ RestoreFrameStateForTailCall();
1024  }
1025  frame_access_state()->SetFrameAccessToSP();
1026}
1027
1028void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
1029                                                     Register scratch1,
1030                                                     Register scratch2,
1031                                                     Register scratch3) {
1032  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
1033  Label done;
1034
1035  // Check if current frame is an arguments adaptor frame.
1036  __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
1037  __ CmpP(scratch1,
1038          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1039  __ bne(&done);
1040
1041  // Load arguments count from current arguments adaptor frame (note, it
1042  // does not include receiver).
1043  Register caller_args_count_reg = scratch1;
1044  __ LoadP(caller_args_count_reg,
1045           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1046  __ SmiUntag(caller_args_count_reg);
1047
1048  ParameterCount callee_args_count(args_reg);
1049  __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
1050                        scratch3);
1051  __ bind(&done);
1052}
1053
1054namespace {
1055
1056void FlushPendingPushRegisters(MacroAssembler* masm,
1057                               FrameAccessState* frame_access_state,
1058                               ZoneVector<Register>* pending_pushes) {
1059  switch (pending_pushes->size()) {
1060    case 0:
1061      break;
1062    case 1:
1063      masm->Push((*pending_pushes)[0]);
1064      break;
1065    case 2:
1066      masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
1067      break;
1068    case 3:
1069      masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
1070                 (*pending_pushes)[2]);
1071      break;
1072    default:
1073      UNREACHABLE();
1074      break;
1075  }
1076  frame_access_state->IncreaseSPDelta(pending_pushes->size());
1077  pending_pushes->resize(0);
1078}
1079
1080void AddPendingPushRegister(MacroAssembler* masm,
1081                            FrameAccessState* frame_access_state,
1082                            ZoneVector<Register>* pending_pushes,
1083                            Register reg) {
1084  pending_pushes->push_back(reg);
1085  if (pending_pushes->size() == 3 || reg.is(ip)) {
1086    FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
1087  }
1088}
1089void AdjustStackPointerForTailCall(
1090    MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
1091    ZoneVector<Register>* pending_pushes = nullptr,
1092    bool allow_shrinkage = true) {
1093  int current_sp_offset = state->GetSPToFPSlotCount() +
1094                          StandardFrameConstants::kFixedSlotCountAboveFp;
1095  int stack_slot_delta = new_slot_above_sp - current_sp_offset;
1096  if (stack_slot_delta > 0) {
1097    if (pending_pushes != nullptr) {
1098      FlushPendingPushRegisters(masm, state, pending_pushes);
1099    }
1100    masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
1101    state->IncreaseSPDelta(stack_slot_delta);
1102  } else if (allow_shrinkage && stack_slot_delta < 0) {
1103    if (pending_pushes != nullptr) {
1104      FlushPendingPushRegisters(masm, state, pending_pushes);
1105    }
1106    masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
1107    state->IncreaseSPDelta(stack_slot_delta);
1108  }
1109}
1110
1111}  // namespace
1112
1113void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
1114                                              int first_unused_stack_slot) {
1115  CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
1116  ZoneVector<MoveOperands*> pushes(zone());
1117  GetPushCompatibleMoves(instr, flags, &pushes);
1118
1119  if (!pushes.empty() &&
1120      (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
1121       first_unused_stack_slot)) {
1122    S390OperandConverter g(this, instr);
1123    ZoneVector<Register> pending_pushes(zone());
1124    for (auto move : pushes) {
1125      LocationOperand destination_location(
1126          LocationOperand::cast(move->destination()));
1127      InstructionOperand source(move->source());
1128      AdjustStackPointerForTailCall(
1129          masm(), frame_access_state(),
1130          destination_location.index() - pending_pushes.size(),
1131          &pending_pushes);
1132      if (source.IsStackSlot()) {
1133        LocationOperand source_location(LocationOperand::cast(source));
1134        __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
1135        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
1136                               ip);
1137      } else if (source.IsRegister()) {
1138        LocationOperand source_location(LocationOperand::cast(source));
1139        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
1140                               source_location.GetRegister());
1141      } else if (source.IsImmediate()) {
1142        AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
1143                               ip);
1144      } else {
1145        // Pushes of non-scalar data types is not supported.
1146        UNIMPLEMENTED();
1147      }
1148      move->Eliminate();
1149    }
1150    FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
1151  }
1152  AdjustStackPointerForTailCall(masm(), frame_access_state(),
1153                                first_unused_stack_slot, nullptr, false);
1154}
1155
1156void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
1157                                             int first_unused_stack_slot) {
1158  AdjustStackPointerForTailCall(masm(), frame_access_state(),
1159                                first_unused_stack_slot);
1160}
1161
1162// Assembles an instruction after register allocation, producing machine code.
1163CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
1164    Instruction* instr) {
1165  S390OperandConverter i(this, instr);
1166  ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
1167
1168  switch (opcode) {
1169    case kArchComment: {
1170      Address comment_string = i.InputExternalReference(0).address();
1171      __ RecordComment(reinterpret_cast<const char*>(comment_string));
1172      break;
1173    }
1174    case kArchCallCodeObject: {
1175      EnsureSpaceForLazyDeopt();
1176      if (HasRegisterInput(instr, 0)) {
1177        __ AddP(ip, i.InputRegister(0),
1178                Operand(Code::kHeaderSize - kHeapObjectTag));
1179        __ Call(ip);
1180      } else {
1181        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
1182                RelocInfo::CODE_TARGET);
1183      }
1184      RecordCallPosition(instr);
1185      frame_access_state()->ClearSPDelta();
1186      break;
1187    }
1188    case kArchTailCallCodeObjectFromJSFunction:
1189    case kArchTailCallCodeObject: {
1190      if (opcode == kArchTailCallCodeObjectFromJSFunction) {
1191        AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
1192                                         i.TempRegister(0), i.TempRegister(1),
1193                                         i.TempRegister(2));
1194      }
1195      if (HasRegisterInput(instr, 0)) {
1196        __ AddP(ip, i.InputRegister(0),
1197                Operand(Code::kHeaderSize - kHeapObjectTag));
1198        __ Jump(ip);
1199      } else {
1200        // We cannot use the constant pool to load the target since
1201        // we've already restored the caller's frame.
1202        ConstantPoolUnavailableScope constant_pool_unavailable(masm());
1203        __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
1204                RelocInfo::CODE_TARGET);
1205      }
1206      frame_access_state()->ClearSPDelta();
1207      frame_access_state()->SetFrameAccessToDefault();
1208      break;
1209    }
1210    case kArchTailCallAddress: {
1211      CHECK(!instr->InputAt(0)->IsImmediate());
1212      __ Jump(i.InputRegister(0));
1213      frame_access_state()->ClearSPDelta();
1214      frame_access_state()->SetFrameAccessToDefault();
1215      break;
1216    }
1217    case kArchCallJSFunction: {
1218      EnsureSpaceForLazyDeopt();
1219      Register func = i.InputRegister(0);
1220      if (FLAG_debug_code) {
1221        // Check the function's context matches the context argument.
1222        __ LoadP(kScratchReg,
1223                 FieldMemOperand(func, JSFunction::kContextOffset));
1224        __ CmpP(cp, kScratchReg);
1225        __ Assert(eq, kWrongFunctionContext);
1226      }
1227      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
1228      __ Call(ip);
1229      RecordCallPosition(instr);
1230      frame_access_state()->ClearSPDelta();
1231      break;
1232    }
1233    case kArchTailCallJSFunctionFromJSFunction: {
1234      Register func = i.InputRegister(0);
1235      if (FLAG_debug_code) {
1236        // Check the function's context matches the context argument.
1237        __ LoadP(kScratchReg,
1238                 FieldMemOperand(func, JSFunction::kContextOffset));
1239        __ CmpP(cp, kScratchReg);
1240        __ Assert(eq, kWrongFunctionContext);
1241      }
1242      AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
1243                                       i.TempRegister(0), i.TempRegister(1),
1244                                       i.TempRegister(2));
1245      __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
1246      __ Jump(ip);
1247      frame_access_state()->ClearSPDelta();
1248      frame_access_state()->SetFrameAccessToDefault();
1249      break;
1250    }
1251    case kArchPrepareCallCFunction: {
1252      int const num_parameters = MiscField::decode(instr->opcode());
1253      __ PrepareCallCFunction(num_parameters, kScratchReg);
1254      // Frame alignment requires using FP-relative frame addressing.
1255      frame_access_state()->SetFrameAccessToFP();
1256      break;
1257    }
1258    case kArchPrepareTailCall:
1259      AssemblePrepareTailCall();
1260      break;
1261    case kArchCallCFunction: {
1262      int const num_parameters = MiscField::decode(instr->opcode());
1263      if (instr->InputAt(0)->IsImmediate()) {
1264        ExternalReference ref = i.InputExternalReference(0);
1265        __ CallCFunction(ref, num_parameters);
1266      } else {
1267        Register func = i.InputRegister(0);
1268        __ CallCFunction(func, num_parameters);
1269      }
1270      frame_access_state()->SetFrameAccessToDefault();
1271      frame_access_state()->ClearSPDelta();
1272      break;
1273    }
1274    case kArchJmp:
1275      AssembleArchJump(i.InputRpo(0));
1276      break;
1277    case kArchLookupSwitch:
1278      AssembleArchLookupSwitch(instr);
1279      break;
1280    case kArchTableSwitch:
1281      AssembleArchTableSwitch(instr);
1282      break;
1283    case kArchDebugBreak:
1284      __ stop("kArchDebugBreak");
1285      break;
1286    case kArchNop:
1287    case kArchThrowTerminator:
1288      // don't emit code for nops.
1289      break;
1290    case kArchDeoptimize: {
1291      int deopt_state_id =
1292          BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
1293      CodeGenResult result =
1294          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
1295      if (result != kSuccess) return result;
1296      break;
1297    }
1298    case kArchRet:
1299      AssembleReturn(instr->InputAt(0));
1300      break;
1301    case kArchStackPointer:
1302      __ LoadRR(i.OutputRegister(), sp);
1303      break;
1304    case kArchFramePointer:
1305      __ LoadRR(i.OutputRegister(), fp);
1306      break;
1307    case kArchParentFramePointer:
1308      if (frame_access_state()->has_frame()) {
1309        __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1310      } else {
1311        __ LoadRR(i.OutputRegister(), fp);
1312      }
1313      break;
1314    case kArchTruncateDoubleToI:
1315      // TODO(mbrandy): move slow call to stub out of line.
1316      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
1317      break;
1318    case kArchStoreWithWriteBarrier: {
1319      RecordWriteMode mode =
1320          static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1321      Register object = i.InputRegister(0);
1322      Register value = i.InputRegister(2);
1323      Register scratch0 = i.TempRegister(0);
1324      Register scratch1 = i.TempRegister(1);
1325      OutOfLineRecordWrite* ool;
1326
1327      AddressingMode addressing_mode =
1328          AddressingModeField::decode(instr->opcode());
1329      if (addressing_mode == kMode_MRI) {
1330        int32_t offset = i.InputInt32(1);
1331        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1332                                                scratch0, scratch1, mode);
1333        __ StoreP(value, MemOperand(object, offset));
1334      } else {
1335        DCHECK_EQ(kMode_MRR, addressing_mode);
1336        Register offset(i.InputRegister(1));
1337        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1338                                                scratch0, scratch1, mode);
1339        __ StoreP(value, MemOperand(object, offset));
1340      }
1341      __ CheckPageFlag(object, scratch0,
1342                       MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1343                       ool->entry());
1344      __ bind(ool->exit());
1345      break;
1346    }
1347    case kArchStackSlot: {
1348      FrameOffset offset =
1349          frame_access_state()->GetFrameOffset(i.InputInt32(0));
1350      __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1351              Operand(offset.offset()));
1352      break;
1353    }
1354    case kS390_And32:
1355      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1356        ASSEMBLE_BIN_OP(nrk, And, nilf);
1357      } else {
1358        ASSEMBLE_BIN_OP(nr, And, nilf);
1359      }
1360      break;
1361    case kS390_And64:
1362      ASSEMBLE_BINOP(AndP);
1363      break;
1364    case kS390_Or32:
1365      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1366        ASSEMBLE_BIN_OP(ork, Or, oilf);
1367      } else {
1368        ASSEMBLE_BIN_OP(or_z, Or, oilf);
1369      }
1370      break;
1371    case kS390_Or64:
1372      ASSEMBLE_BINOP(OrP);
1373      break;
1374    case kS390_Xor32:
1375      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1376        ASSEMBLE_BIN_OP(xrk, Xor, xilf);
1377      } else {
1378        ASSEMBLE_BIN_OP(xr, Xor, xilf);
1379      }
1380      break;
1381    case kS390_Xor64:
1382      ASSEMBLE_BINOP(XorP);
1383      break;
1384    case kS390_ShiftLeft32:
1385      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1386        AssembleBinOp(i, masm(), instr, &MacroAssembler::ShiftLeft,
1387                      &MacroAssembler::ShiftLeft);
1388      } else {
1389        AssembleBinOp(i, masm(), instr, &MacroAssembler::sll,
1390                      &MacroAssembler::sll);
1391      }
1392      break;
1393#if V8_TARGET_ARCH_S390X
1394    case kS390_ShiftLeft64:
1395      ASSEMBLE_BINOP(sllg);
1396      break;
1397#endif
1398    case kS390_ShiftRight32:
1399      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1400        AssembleBinOp(i, masm(), instr, &MacroAssembler::srlk,
1401                      &MacroAssembler::srlk);
1402      } else {
1403        AssembleBinOp(i, masm(), instr, &MacroAssembler::srl,
1404                      &MacroAssembler::srl);
1405      }
1406      break;
1407#if V8_TARGET_ARCH_S390X
1408    case kS390_ShiftRight64:
1409      ASSEMBLE_BINOP(srlg);
1410      break;
1411#endif
1412    case kS390_ShiftRightArith32:
1413      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1414        AssembleBinOp(i, masm(), instr, &MacroAssembler::srak,
1415                      &MacroAssembler::srak);
1416      } else {
1417        AssembleBinOp(i, masm(), instr, &MacroAssembler::sra,
1418                      &MacroAssembler::sra);
1419      }
1420      break;
1421#if V8_TARGET_ARCH_S390X
1422    case kS390_ShiftRightArith64:
1423      ASSEMBLE_BINOP(srag);
1424      break;
1425#endif
1426#if !V8_TARGET_ARCH_S390X
1427    case kS390_AddPair:
1428      // i.InputRegister(0) ... left low word.
1429      // i.InputRegister(1) ... left high word.
1430      // i.InputRegister(2) ... right low word.
1431      // i.InputRegister(3) ... right high word.
1432      __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
1433                      i.InputRegister(2));
1434      __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
1435                               i.InputRegister(3));
1436      break;
1437    case kS390_SubPair:
1438      // i.InputRegister(0) ... left low word.
1439      // i.InputRegister(1) ... left high word.
1440      // i.InputRegister(2) ... right low word.
1441      // i.InputRegister(3) ... right high word.
1442      __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
1443                      i.InputRegister(2));
1444      __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
1445                                i.InputRegister(3));
1446      break;
1447    case kS390_MulPair:
1448      // i.InputRegister(0) ... left low word.
1449      // i.InputRegister(1) ... left high word.
1450      // i.InputRegister(2) ... right low word.
1451      // i.InputRegister(3) ... right high word.
1452      __ sllg(r0, i.InputRegister(1), Operand(32));
1453      __ sllg(r1, i.InputRegister(3), Operand(32));
1454      __ lr(r0, i.InputRegister(0));
1455      __ lr(r1, i.InputRegister(2));
1456      __ msgr(r1, r0);
1457      __ lr(i.OutputRegister(0), r1);
1458      __ srag(i.OutputRegister(1), r1, Operand(32));
1459      break;
1460    case kS390_ShiftLeftPair: {
1461      Register second_output =
1462          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1463      if (instr->InputAt(2)->IsImmediate()) {
1464        __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1465                         i.InputRegister(1), i.InputInt32(2));
1466      } else {
1467        __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1468                         i.InputRegister(1), kScratchReg, i.InputRegister(2));
1469      }
1470      break;
1471    }
1472    case kS390_ShiftRightPair: {
1473      Register second_output =
1474          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1475      if (instr->InputAt(2)->IsImmediate()) {
1476        __ ShiftRightPair(i.OutputRegister(0), second_output,
1477                          i.InputRegister(0), i.InputRegister(1),
1478                          i.InputInt32(2));
1479      } else {
1480        __ ShiftRightPair(i.OutputRegister(0), second_output,
1481                          i.InputRegister(0), i.InputRegister(1), kScratchReg,
1482                          i.InputRegister(2));
1483      }
1484      break;
1485    }
1486    case kS390_ShiftRightArithPair: {
1487      Register second_output =
1488          instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1489      if (instr->InputAt(2)->IsImmediate()) {
1490        __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1491                               i.InputRegister(0), i.InputRegister(1),
1492                               i.InputInt32(2));
1493      } else {
1494        __ ShiftRightArithPair(i.OutputRegister(0), second_output,
1495                               i.InputRegister(0), i.InputRegister(1),
1496                               kScratchReg, i.InputRegister(2));
1497      }
1498      break;
1499    }
1500#endif
1501    case kS390_RotRight32: {
1502      if (HasRegisterInput(instr, 1)) {
1503        __ LoadComplementRR(kScratchReg, i.InputRegister(1));
1504        __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1505      } else {
1506        __ rll(i.OutputRegister(), i.InputRegister(0),
1507               Operand(32 - i.InputInt32(1)));
1508      }
1509      CHECK_AND_ZERO_EXT_OUTPUT(2);
1510      break;
1511    }
1512#if V8_TARGET_ARCH_S390X
1513    case kS390_RotRight64:
1514      if (HasRegisterInput(instr, 1)) {
1515        __ LoadComplementRR(kScratchReg, i.InputRegister(1));
1516        __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
1517      } else {
1518        __ rllg(i.OutputRegister(), i.InputRegister(0),
1519                Operand(64 - i.InputInt32(1)));
1520      }
1521      break;
1522    case kS390_RotLeftAndClear64:
1523      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1524        int shiftAmount = i.InputInt32(1);
1525        int endBit = 63 - shiftAmount;
1526        int startBit = 63 - i.InputInt32(2);
1527        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
1528                 Operand(endBit), Operand(shiftAmount), true);
1529      } else {
1530        int shiftAmount = i.InputInt32(1);
1531        int clearBit = 63 - i.InputInt32(2);
1532        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1533        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1534        __ srlg(i.OutputRegister(), i.OutputRegister(),
1535                Operand(clearBit + shiftAmount));
1536        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
1537      }
1538      break;
1539    case kS390_RotLeftAndClearLeft64:
1540      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1541        int shiftAmount = i.InputInt32(1);
1542        int endBit = 63;
1543        int startBit = 63 - i.InputInt32(2);
1544        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
1545                 Operand(endBit), Operand(shiftAmount), true);
1546      } else {
1547        int shiftAmount = i.InputInt32(1);
1548        int clearBit = 63 - i.InputInt32(2);
1549        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1550        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1551        __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1552      }
1553      break;
1554    case kS390_RotLeftAndClearRight64:
1555      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
1556        int shiftAmount = i.InputInt32(1);
1557        int endBit = 63 - i.InputInt32(2);
1558        int startBit = 0;
1559        __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
1560                 Operand(endBit), Operand(shiftAmount), true);
1561      } else {
1562        int shiftAmount = i.InputInt32(1);
1563        int clearBit = i.InputInt32(2);
1564        __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
1565        __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1566        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
1567      }
1568      break;
1569#endif
1570    case kS390_Add32: {
1571      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1572        ASSEMBLE_BIN_OP(ark, Add32, Add32_RRI);
1573      } else {
1574        ASSEMBLE_BIN_OP(ar, Add32, Add32_RI);
1575      }
1576      break;
1577    }
1578    case kS390_Add64:
1579      ASSEMBLE_BINOP(AddP);
1580      break;
1581    case kS390_AddFloat:
1582      // Ensure we don't clobber right/InputReg(1)
1583      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1584        ASSEMBLE_FLOAT_UNOP(aebr);
1585      } else {
1586        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1587          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1588        __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1589      }
1590      break;
1591    case kS390_AddDouble:
1592      // Ensure we don't clobber right/InputReg(1)
1593      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1594        ASSEMBLE_FLOAT_UNOP(adbr);
1595      } else {
1596        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1597          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1598        __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1599      }
1600      break;
1601    case kS390_Sub32:
1602      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
1603        ASSEMBLE_BIN_OP(srk, Sub32, Sub32_RRI);
1604      } else {
1605        ASSEMBLE_BIN_OP(sr, Sub32, Sub32_RI);
1606      }
1607      break;
1608    case kS390_Sub64:
1609      ASSEMBLE_BINOP(SubP);
1610      break;
1611    case kS390_SubFloat:
1612      // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
1613      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1614        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1615        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1616        __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
1617      } else {
1618        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
1619          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1620        }
1621        __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1622      }
1623      break;
1624    case kS390_SubDouble:
1625      // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
1626      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1627        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1628        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1629        __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
1630      } else {
1631        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
1632          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1633        }
1634        __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1635      }
1636      break;
1637    case kS390_Mul32:
1638      ASSEMBLE_BIN_OP(Mul32, Mul32, Mul32);
1639      break;
1640    case kS390_Mul32WithOverflow:
1641      ASSEMBLE_BIN_OP(Mul32WithOverflowIfCCUnequal,
1642                      Mul32WithOverflowIfCCUnequal,
1643                      Mul32WithOverflowIfCCUnequal);
1644      break;
1645    case kS390_Mul64:
1646      CHECK(i.OutputRegister().is(i.InputRegister(0)));
1647      if (HasRegisterInput(instr, 1)) {
1648        __ Mul64(i.InputRegister(0), i.InputRegister(1));
1649      } else if (HasImmediateInput(instr, 1)) {
1650        __ Mul64(i.InputRegister(0), i.InputImmediate(1));
1651      } else if (HasStackSlotInput(instr, 1)) {
1652        __ Mul64(i.InputRegister(0), i.InputStackSlot(1));
1653      } else {
1654        UNIMPLEMENTED();
1655      }
1656      break;
1657    case kS390_MulHigh32:
1658      ASSEMBLE_BIN_OP(MulHigh32, MulHigh32, MulHigh32);
1659      break;
1660    case kS390_MulHighU32:
1661      ASSEMBLE_BIN_OP(MulHighU32, MulHighU32, MulHighU32);
1662      break;
1663    case kS390_MulFloat:
1664      // Ensure we don't clobber right
1665      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1666        ASSEMBLE_FLOAT_UNOP(meebr);
1667      } else {
1668        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1669          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1670        __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1671      }
1672      break;
1673    case kS390_MulDouble:
1674      // Ensure we don't clobber right
1675      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1676        ASSEMBLE_FLOAT_UNOP(mdbr);
1677      } else {
1678        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1679          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1680        __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1681      }
1682      break;
1683#if V8_TARGET_ARCH_S390X
1684    case kS390_Div64:
1685      __ LoadRR(r1, i.InputRegister(0));
1686      __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
1687      __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
1688      break;
1689#endif
1690    case kS390_Div32: {
1691      ASSEMBLE_BIN_OP(Div32, Div32, Div32);
1692      break;
1693    }
1694#if V8_TARGET_ARCH_S390X
1695    case kS390_DivU64:
1696      __ LoadRR(r1, i.InputRegister(0));
1697      __ LoadImmP(r0, Operand::Zero());
1698      __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
1699      __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
1700      break;
1701#endif
1702    case kS390_DivU32: {
1703      ASSEMBLE_BIN_OP(DivU32, DivU32, DivU32);
1704      break;
1705    }
1706    case kS390_DivFloat:
1707      // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
1708      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1709        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1710        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1711        __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
1712      } else {
1713        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1714          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1715        __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1716      }
1717      break;
1718    case kS390_DivDouble:
1719      // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
1720      if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
1721        __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
1722        __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1723        __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
1724      } else {
1725        if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
1726          __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1727        __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
1728      }
1729      break;
1730    case kS390_Mod32:
1731      ASSEMBLE_BIN_OP(Mod32, Mod32, Mod32);
1732      break;
1733    case kS390_ModU32:
1734      ASSEMBLE_BIN_OP(ModU32, ModU32, ModU32);
1735      break;
1736#if V8_TARGET_ARCH_S390X
1737    case kS390_Mod64:
1738      __ LoadRR(r1, i.InputRegister(0));
1739      __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
1740      __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
1741      break;
1742    case kS390_ModU64:
1743      __ LoadRR(r1, i.InputRegister(0));
1744      __ LoadImmP(r0, Operand::Zero());
1745      __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
1746      __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
1747      break;
1748#endif
1749    case kS390_AbsFloat:
1750      __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1751      break;
1752    case kS390_SqrtFloat:
1753      ASSEMBLE_FLOAT_UNOP(sqebr);
1754      break;
1755    case kS390_FloorFloat:
1756      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1757                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
1758      break;
1759    case kS390_CeilFloat:
1760      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1761                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
1762      break;
1763    case kS390_TruncateFloat:
1764      __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1765                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
1766      break;
1767    //  Double operations
1768    case kS390_ModDouble:
1769      ASSEMBLE_FLOAT_MODULO();
1770      break;
1771    case kIeee754Float64Acos:
1772      ASSEMBLE_IEEE754_UNOP(acos);
1773      break;
1774    case kIeee754Float64Acosh:
1775      ASSEMBLE_IEEE754_UNOP(acosh);
1776      break;
1777    case kIeee754Float64Asin:
1778      ASSEMBLE_IEEE754_UNOP(asin);
1779      break;
1780    case kIeee754Float64Asinh:
1781      ASSEMBLE_IEEE754_UNOP(asinh);
1782      break;
1783    case kIeee754Float64Atanh:
1784      ASSEMBLE_IEEE754_UNOP(atanh);
1785      break;
1786    case kIeee754Float64Atan:
1787      ASSEMBLE_IEEE754_UNOP(atan);
1788      break;
1789    case kIeee754Float64Atan2:
1790      ASSEMBLE_IEEE754_BINOP(atan2);
1791      break;
1792    case kIeee754Float64Tan:
1793      ASSEMBLE_IEEE754_UNOP(tan);
1794      break;
1795    case kIeee754Float64Tanh:
1796      ASSEMBLE_IEEE754_UNOP(tanh);
1797      break;
1798    case kIeee754Float64Cbrt:
1799      ASSEMBLE_IEEE754_UNOP(cbrt);
1800      break;
1801    case kIeee754Float64Sin:
1802      ASSEMBLE_IEEE754_UNOP(sin);
1803      break;
1804    case kIeee754Float64Sinh:
1805      ASSEMBLE_IEEE754_UNOP(sinh);
1806      break;
1807    case kIeee754Float64Cos:
1808      ASSEMBLE_IEEE754_UNOP(cos);
1809      break;
1810    case kIeee754Float64Cosh:
1811      ASSEMBLE_IEEE754_UNOP(cosh);
1812      break;
1813    case kIeee754Float64Exp:
1814      ASSEMBLE_IEEE754_UNOP(exp);
1815      break;
1816    case kIeee754Float64Expm1:
1817      ASSEMBLE_IEEE754_UNOP(expm1);
1818      break;
1819    case kIeee754Float64Log:
1820      ASSEMBLE_IEEE754_UNOP(log);
1821      break;
1822    case kIeee754Float64Log1p:
1823      ASSEMBLE_IEEE754_UNOP(log1p);
1824      break;
1825    case kIeee754Float64Log2:
1826      ASSEMBLE_IEEE754_UNOP(log2);
1827      break;
1828    case kIeee754Float64Log10:
1829      ASSEMBLE_IEEE754_UNOP(log10);
1830      break;
1831    case kIeee754Float64Pow: {
1832      MathPowStub stub(isolate(), MathPowStub::DOUBLE);
1833      __ CallStub(&stub);
1834      __ Move(d1, d3);
1835      break;
1836    }
1837    case kS390_Neg32:
1838      __ lcr(i.OutputRegister(), i.InputRegister(0));
1839      CHECK_AND_ZERO_EXT_OUTPUT(1);
1840      break;
1841    case kS390_Neg64:
1842      __ lcgr(i.OutputRegister(), i.InputRegister(0));
1843      break;
1844    case kS390_MaxFloat:
1845      ASSEMBLE_FLOAT_MAX();
1846      break;
1847    case kS390_MaxDouble:
1848      ASSEMBLE_DOUBLE_MAX();
1849      break;
1850    case kS390_MinFloat:
1851      ASSEMBLE_FLOAT_MIN();
1852      break;
1853    case kS390_MinDouble:
1854      ASSEMBLE_DOUBLE_MIN();
1855      break;
1856    case kS390_AbsDouble:
1857      __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1858      break;
1859    case kS390_SqrtDouble:
1860      ASSEMBLE_FLOAT_UNOP(sqdbr);
1861      break;
1862    case kS390_FloorDouble:
1863      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1864                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
1865      break;
1866    case kS390_CeilDouble:
1867      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1868                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
1869      break;
1870    case kS390_TruncateDouble:
1871      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1872                v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
1873      break;
1874    case kS390_RoundDouble:
1875      __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1876                v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
1877      break;
1878    case kS390_NegFloat:
1879      ASSEMBLE_FLOAT_UNOP(lcebr);
1880      break;
1881    case kS390_NegDouble:
1882      ASSEMBLE_FLOAT_UNOP(lcdbr);
1883      break;
1884    case kS390_Cntlz32: {
1885      __ llgfr(i.OutputRegister(), i.InputRegister(0));
1886      __ flogr(r0, i.OutputRegister());
1887      __ Add32(i.OutputRegister(), r0, Operand(-32));
1888      // No need to zero-ext b/c llgfr is done already
1889      break;
1890    }
1891#if V8_TARGET_ARCH_S390X
1892    case kS390_Cntlz64: {
1893      __ flogr(r0, i.InputRegister(0));
1894      __ LoadRR(i.OutputRegister(), r0);
1895      break;
1896    }
1897#endif
1898    case kS390_Popcnt32:
1899      __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
1900      break;
1901#if V8_TARGET_ARCH_S390X
1902    case kS390_Popcnt64:
1903      __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
1904      break;
1905#endif
1906    case kS390_Cmp32:
1907      ASSEMBLE_COMPARE32(Cmp32, CmpLogical32);
1908      break;
1909#if V8_TARGET_ARCH_S390X
1910    case kS390_Cmp64:
1911      ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
1912      break;
1913#endif
1914    case kS390_CmpFloat:
1915      ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
1916      // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1917      break;
1918    case kS390_CmpDouble:
1919      ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
1920      // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1921      break;
1922    case kS390_Tst32:
1923      if (HasRegisterInput(instr, 1)) {
1924        __ And(r0, i.InputRegister(0), i.InputRegister(1));
1925      } else {
1926        Operand opnd = i.InputImmediate(1);
1927        if (is_uint16(opnd.immediate())) {
1928          __ tmll(i.InputRegister(0), opnd);
1929        } else {
1930          __ lr(r0, i.InputRegister(0));
1931          __ nilf(r0, opnd);
1932        }
1933      }
1934      break;
1935    case kS390_Tst64:
1936      if (HasRegisterInput(instr, 1)) {
1937        __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
1938      } else {
1939        Operand opnd = i.InputImmediate(1);
1940        if (is_uint16(opnd.immediate())) {
1941          __ tmll(i.InputRegister(0), opnd);
1942        } else {
1943          __ AndP(r0, i.InputRegister(0), opnd);
1944        }
1945      }
1946      break;
1947    case kS390_Float64SilenceNaN: {
1948      DoubleRegister value = i.InputDoubleRegister(0);
1949      DoubleRegister result = i.OutputDoubleRegister();
1950      __ CanonicalizeNaN(result, value);
1951      break;
1952    }
1953    case kS390_Push:
1954      if (instr->InputAt(0)->IsFPRegister()) {
1955        __ lay(sp, MemOperand(sp, -kDoubleSize));
1956        __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
1957        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1958      } else {
1959        __ Push(i.InputRegister(0));
1960        frame_access_state()->IncreaseSPDelta(1);
1961      }
1962      break;
1963    case kS390_PushFrame: {
1964      int num_slots = i.InputInt32(1);
1965      __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
1966      if (instr->InputAt(0)->IsFPRegister()) {
1967        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1968        if (op->representation() == MachineRepresentation::kFloat64) {
1969          __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
1970        } else {
1971          DCHECK(op->representation() == MachineRepresentation::kFloat32);
1972          __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
1973        }
1974      } else {
1975        __ StoreP(i.InputRegister(0),
1976                  MemOperand(sp));
1977      }
1978      break;
1979    }
1980    case kS390_StoreToStackSlot: {
1981      int slot = i.InputInt32(1);
1982      if (instr->InputAt(0)->IsFPRegister()) {
1983        LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1984        if (op->representation() == MachineRepresentation::kFloat64) {
1985          __ StoreDouble(i.InputDoubleRegister(0),
1986                         MemOperand(sp, slot * kPointerSize));
1987        } else {
1988          DCHECK(op->representation() == MachineRepresentation::kFloat32);
1989          __ StoreFloat32(i.InputDoubleRegister(0),
1990                          MemOperand(sp, slot * kPointerSize));
1991        }
1992      } else {
1993        __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
1994      }
1995      break;
1996    }
1997    case kS390_ExtendSignWord8:
1998      __ lbr(i.OutputRegister(), i.InputRegister(0));
1999      CHECK_AND_ZERO_EXT_OUTPUT(1);
2000      break;
2001    case kS390_ExtendSignWord16:
2002      __ lhr(i.OutputRegister(), i.InputRegister(0));
2003      CHECK_AND_ZERO_EXT_OUTPUT(1);
2004      break;
2005#if V8_TARGET_ARCH_S390X
2006    case kS390_ExtendSignWord32:
2007      __ lgfr(i.OutputRegister(), i.InputRegister(0));
2008      break;
2009    case kS390_Uint32ToUint64:
2010      // Zero extend
2011      __ llgfr(i.OutputRegister(), i.InputRegister(0));
2012      break;
2013    case kS390_Int64ToInt32:
2014      // sign extend
2015      __ lgfr(i.OutputRegister(), i.InputRegister(0));
2016      break;
2017    case kS390_Int64ToFloat32:
2018      __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
2019      break;
2020    case kS390_Int64ToDouble:
2021      __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
2022      break;
2023    case kS390_Uint64ToFloat32:
2024      __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
2025                                     i.OutputDoubleRegister());
2026      break;
2027    case kS390_Uint64ToDouble:
2028      __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
2029                                      i.OutputDoubleRegister());
2030      break;
2031#endif
2032    case kS390_Int32ToFloat32:
2033      __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
2034      break;
2035    case kS390_Int32ToDouble:
2036      __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
2037      break;
2038    case kS390_Uint32ToFloat32:
2039      __ ConvertUnsignedIntToFloat(i.InputRegister(0),
2040                                   i.OutputDoubleRegister());
2041      break;
2042    case kS390_Uint32ToDouble:
2043      __ ConvertUnsignedIntToDouble(i.InputRegister(0),
2044                                    i.OutputDoubleRegister());
2045      break;
2046    case kS390_DoubleToInt32:
2047    case kS390_DoubleToUint32:
2048    case kS390_DoubleToInt64: {
2049#if V8_TARGET_ARCH_S390X
2050      bool check_conversion =
2051          (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
2052#endif
2053      __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
2054#if !V8_TARGET_ARCH_S390X
2055                              kScratchReg,
2056#endif
2057                              i.OutputRegister(0), kScratchDoubleReg);
2058#if V8_TARGET_ARCH_S390X
2059      if (check_conversion) {
2060        Label conversion_done;
2061        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
2062        __ b(Condition(1), &conversion_done);  // special case
2063        __ LoadImmP(i.OutputRegister(1), Operand(1));
2064        __ bind(&conversion_done);
2065      }
2066#endif
2067      break;
2068    }
2069    case kS390_Float32ToInt32: {
2070      bool check_conversion = (i.OutputCount() > 1);
2071      __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
2072                               kScratchDoubleReg, kRoundToZero);
2073      if (check_conversion) {
2074        Label conversion_done;
2075        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
2076        __ b(Condition(1), &conversion_done);  // special case
2077        __ LoadImmP(i.OutputRegister(1), Operand(1));
2078        __ bind(&conversion_done);
2079      }
2080      break;
2081    }
2082    case kS390_Float32ToUint32: {
2083      bool check_conversion = (i.OutputCount() > 1);
2084      __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
2085                                       i.OutputRegister(0), kScratchDoubleReg);
2086      if (check_conversion) {
2087        Label conversion_done;
2088        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
2089        __ b(Condition(1), &conversion_done);  // special case
2090        __ LoadImmP(i.OutputRegister(1), Operand(1));
2091        __ bind(&conversion_done);
2092      }
2093      break;
2094    }
2095#if V8_TARGET_ARCH_S390X
2096    case kS390_Float32ToUint64: {
2097      bool check_conversion = (i.OutputCount() > 1);
2098      __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
2099                                       i.OutputRegister(0), kScratchDoubleReg);
2100      if (check_conversion) {
2101        Label conversion_done;
2102        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
2103        __ b(Condition(1), &conversion_done);  // special case
2104        __ LoadImmP(i.OutputRegister(1), Operand(1));
2105        __ bind(&conversion_done);
2106      }
2107      break;
2108    }
2109#endif
2110    case kS390_Float32ToInt64: {
2111#if V8_TARGET_ARCH_S390X
2112      bool check_conversion =
2113          (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
2114#endif
2115      __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
2116#if !V8_TARGET_ARCH_S390X
2117                               kScratchReg,
2118#endif
2119                               i.OutputRegister(0), kScratchDoubleReg);
2120#if V8_TARGET_ARCH_S390X
2121      if (check_conversion) {
2122        Label conversion_done;
2123        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
2124        __ b(Condition(1), &conversion_done);  // special case
2125        __ LoadImmP(i.OutputRegister(1), Operand(1));
2126        __ bind(&conversion_done);
2127      }
2128#endif
2129      break;
2130    }
2131#if V8_TARGET_ARCH_S390X
2132    case kS390_DoubleToUint64: {
2133      bool check_conversion = (i.OutputCount() > 1);
2134      __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
2135                                      i.OutputRegister(0), kScratchDoubleReg);
2136      if (check_conversion) {
2137        Label conversion_done;
2138        __ LoadImmP(i.OutputRegister(1), Operand::Zero());
2139        __ b(Condition(1), &conversion_done);  // special case
2140        __ LoadImmP(i.OutputRegister(1), Operand(1));
2141        __ bind(&conversion_done);
2142      }
2143      break;
2144    }
2145#endif
2146    case kS390_DoubleToFloat32:
2147      __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2148      break;
2149    case kS390_Float32ToDouble:
2150      __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
2151      break;
2152    case kS390_DoubleExtractLowWord32:
2153      __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2154      __ llgfr(i.OutputRegister(), i.OutputRegister());
2155      break;
2156    case kS390_DoubleExtractHighWord32:
2157      __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
2158      __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
2159      break;
2160    case kS390_DoubleInsertLowWord32:
2161      __ lgdr(kScratchReg, i.OutputDoubleRegister());
2162      __ lr(kScratchReg, i.InputRegister(1));
2163      __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2164      break;
2165    case kS390_DoubleInsertHighWord32:
2166      __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
2167      __ lgdr(r0, i.OutputDoubleRegister());
2168      __ lr(kScratchReg, r0);
2169      __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2170      break;
2171    case kS390_DoubleConstruct:
2172      __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
2173      __ lr(kScratchReg, i.InputRegister(1));
2174
2175      // Bitwise convert from GPR to FPR
2176      __ ldgr(i.OutputDoubleRegister(), kScratchReg);
2177      break;
2178    case kS390_LoadWordS8:
2179      ASSEMBLE_LOAD_INTEGER(LoadlB);
2180#if V8_TARGET_ARCH_S390X
2181      __ lgbr(i.OutputRegister(), i.OutputRegister());
2182#else
2183      __ lbr(i.OutputRegister(), i.OutputRegister());
2184#endif
2185      break;
2186    case kS390_BitcastFloat32ToInt32:
2187      __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
2188      break;
2189    case kS390_BitcastInt32ToFloat32:
2190      __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
2191      break;
2192#if V8_TARGET_ARCH_S390X
2193    case kS390_BitcastDoubleToInt64:
2194      __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
2195      break;
2196    case kS390_BitcastInt64ToDouble:
2197      __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
2198      break;
2199#endif
2200    case kS390_LoadWordU8:
2201      ASSEMBLE_LOAD_INTEGER(LoadlB);
2202      break;
2203    case kS390_LoadWordU16:
2204      ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
2205      break;
2206    case kS390_LoadWordS16:
2207      ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
2208      break;
2209    case kS390_LoadWordU32:
2210      ASSEMBLE_LOAD_INTEGER(LoadlW);
2211      break;
2212    case kS390_LoadWordS32:
2213      ASSEMBLE_LOAD_INTEGER(LoadW);
2214      break;
2215    case kS390_LoadReverse16:
2216      ASSEMBLE_LOAD_INTEGER(lrvh);
2217      break;
2218    case kS390_LoadReverse32:
2219      ASSEMBLE_LOAD_INTEGER(lrv);
2220      break;
2221    case kS390_LoadReverse64:
2222      ASSEMBLE_LOAD_INTEGER(lrvg);
2223      break;
2224    case kS390_LoadReverse16RR:
2225      __ lrvr(i.OutputRegister(), i.InputRegister(0));
2226      __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
2227      break;
2228    case kS390_LoadReverse32RR:
2229      __ lrvr(i.OutputRegister(), i.InputRegister(0));
2230      break;
2231    case kS390_LoadReverse64RR:
2232      __ lrvgr(i.OutputRegister(), i.InputRegister(0));
2233      break;
2234#if V8_TARGET_ARCH_S390X
2235    case kS390_LoadWord64:
2236      ASSEMBLE_LOAD_INTEGER(lg);
2237      break;
2238#endif
2239    case kS390_LoadAndTestWord32: {
2240      ASSEMBLE_LOADANDTEST32(ltr, lt_z);
2241      break;
2242    }
2243    case kS390_LoadAndTestWord64: {
2244      ASSEMBLE_LOADANDTEST64(ltgr, ltg);
2245      break;
2246    }
2247    case kS390_LoadFloat32:
2248      ASSEMBLE_LOAD_FLOAT(LoadFloat32);
2249      break;
2250    case kS390_LoadDouble:
2251      ASSEMBLE_LOAD_FLOAT(LoadDouble);
2252      break;
2253    case kS390_StoreWord8:
2254      ASSEMBLE_STORE_INTEGER(StoreByte);
2255      break;
2256    case kS390_StoreWord16:
2257      ASSEMBLE_STORE_INTEGER(StoreHalfWord);
2258      break;
2259    case kS390_StoreWord32:
2260      ASSEMBLE_STORE_INTEGER(StoreW);
2261      break;
2262#if V8_TARGET_ARCH_S390X
2263    case kS390_StoreWord64:
2264      ASSEMBLE_STORE_INTEGER(StoreP);
2265      break;
2266#endif
2267    case kS390_StoreReverse16:
2268      ASSEMBLE_STORE_INTEGER(strvh);
2269      break;
2270    case kS390_StoreReverse32:
2271      ASSEMBLE_STORE_INTEGER(strv);
2272      break;
2273    case kS390_StoreReverse64:
2274      ASSEMBLE_STORE_INTEGER(strvg);
2275      break;
2276    case kS390_StoreFloat32:
2277      ASSEMBLE_STORE_FLOAT32();
2278      break;
2279    case kS390_StoreDouble:
2280      ASSEMBLE_STORE_DOUBLE();
2281      break;
2282    case kS390_Lay:
2283      __ lay(i.OutputRegister(), i.MemoryOperand());
2284      break;
2285    case kCheckedLoadInt8:
2286      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
2287#if V8_TARGET_ARCH_S390X
2288      __ lgbr(i.OutputRegister(), i.OutputRegister());
2289#else
2290      __ lbr(i.OutputRegister(), i.OutputRegister());
2291#endif
2292      break;
2293    case kCheckedLoadUint8:
2294      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
2295      break;
2296    case kCheckedLoadInt16:
2297      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
2298      break;
2299    case kCheckedLoadUint16:
2300      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
2301      break;
2302    case kCheckedLoadWord32:
2303      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
2304      break;
2305    case kCheckedLoadWord64:
2306#if V8_TARGET_ARCH_S390X
2307      ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
2308#else
2309      UNREACHABLE();
2310#endif
2311      break;
2312    case kCheckedLoadFloat32:
2313      ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
2314      break;
2315    case kCheckedLoadFloat64:
2316      ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
2317      break;
2318    case kCheckedStoreWord8:
2319      ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
2320      break;
2321    case kCheckedStoreWord16:
2322      ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
2323      break;
2324    case kCheckedStoreWord32:
2325      ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
2326      break;
2327    case kCheckedStoreWord64:
2328#if V8_TARGET_ARCH_S390X
2329      ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
2330#else
2331      UNREACHABLE();
2332#endif
2333      break;
2334    case kCheckedStoreFloat32:
2335      ASSEMBLE_CHECKED_STORE_FLOAT32();
2336      break;
2337    case kCheckedStoreFloat64:
2338      ASSEMBLE_CHECKED_STORE_DOUBLE();
2339      break;
2340    case kAtomicLoadInt8:
2341      __ LoadB(i.OutputRegister(), i.MemoryOperand());
2342      break;
2343    case kAtomicLoadUint8:
2344      __ LoadlB(i.OutputRegister(), i.MemoryOperand());
2345      break;
2346    case kAtomicLoadInt16:
2347      __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
2348      break;
2349    case kAtomicLoadUint16:
2350      __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
2351      break;
2352    case kAtomicLoadWord32:
2353      __ LoadlW(i.OutputRegister(), i.MemoryOperand());
2354      break;
2355    case kAtomicStoreWord8:
2356      __ StoreByte(i.InputRegister(0), i.MemoryOperand(NULL, 1));
2357      break;
2358    case kAtomicStoreWord16:
2359      __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(NULL, 1));
2360      break;
2361    case kAtomicStoreWord32:
2362      __ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
2363      break;
2364    default:
2365      UNREACHABLE();
2366      break;
2367  }
2368  return kSuccess;
2369}  // NOLINT(readability/fn_size)
2370
2371// Assembles branches after an instruction.
2372void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
2373  S390OperandConverter i(this, instr);
2374  Label* tlabel = branch->true_label;
2375  Label* flabel = branch->false_label;
2376  ArchOpcode op = instr->arch_opcode();
2377  FlagsCondition condition = branch->condition;
2378
2379  Condition cond = FlagsConditionToCondition(condition, op);
2380  if (op == kS390_CmpDouble) {
2381    // check for unordered if necessary
2382    // Branching to flabel/tlabel according to what's expected by tests
2383    if (cond == le || cond == eq || cond == lt) {
2384      __ bunordered(flabel);
2385    } else if (cond == gt || cond == ne || cond == ge) {
2386      __ bunordered(tlabel);
2387    }
2388  }
2389  __ b(cond, tlabel);
2390  if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
2391}
2392
2393void CodeGenerator::AssembleArchJump(RpoNumber target) {
2394  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
2395}
2396
2397void CodeGenerator::AssembleArchTrap(Instruction* instr,
2398                                     FlagsCondition condition) {
2399  class OutOfLineTrap final : public OutOfLineCode {
2400   public:
2401    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
2402        : OutOfLineCode(gen),
2403          frame_elided_(frame_elided),
2404          instr_(instr),
2405          gen_(gen) {}
2406
2407    void Generate() final {
2408      S390OperandConverter i(gen_, instr_);
2409
2410      Builtins::Name trap_id =
2411          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
2412      bool old_has_frame = __ has_frame();
2413      if (frame_elided_) {
2414        __ set_has_frame(true);
2415        __ EnterFrame(StackFrame::WASM_COMPILED);
2416      }
2417      GenerateCallToTrap(trap_id);
2418      if (frame_elided_) {
2419        __ set_has_frame(old_has_frame);
2420      }
2421    }
2422
2423   private:
2424    void GenerateCallToTrap(Builtins::Name trap_id) {
2425      if (trap_id == Builtins::builtin_count) {
2426        // We cannot test calls to the runtime in cctest/test-run-wasm.
2427        // Therefore we emit a call to C here instead of a call to the runtime.
2428        // We use the context register as the scratch register, because we do
2429        // not have a context here.
2430        __ PrepareCallCFunction(0, 0, cp);
2431        __ CallCFunction(
2432            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
2433            0);
2434        __ LeaveFrame(StackFrame::WASM_COMPILED);
2435        __ Ret();
2436      } else {
2437        gen_->AssembleSourcePosition(instr_);
2438        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
2439                RelocInfo::CODE_TARGET);
2440        ReferenceMap* reference_map =
2441            new (gen_->zone()) ReferenceMap(gen_->zone());
2442        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2443                              Safepoint::kNoLazyDeopt);
2444        if (FLAG_debug_code) {
2445          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
2446        }
2447      }
2448    }
2449
2450    bool frame_elided_;
2451    Instruction* instr_;
2452    CodeGenerator* gen_;
2453  };
2454  bool frame_elided = !frame_access_state()->has_frame();
2455  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
2456  Label* tlabel = ool->entry();
2457  Label end;
2458
2459  ArchOpcode op = instr->arch_opcode();
2460  Condition cond = FlagsConditionToCondition(condition, op);
2461  if (op == kS390_CmpDouble) {
2462    // check for unordered if necessary
2463    if (cond == le) {
2464      __ bunordered(&end);
2465      // Unnecessary for eq/lt since only FU bit will be set.
2466    } else if (cond == gt) {
2467      __ bunordered(tlabel);
2468      // Unnecessary for ne/ge since only FU bit will be set.
2469    }
2470  }
2471  __ b(cond, tlabel);
2472  __ bind(&end);
2473}
2474
2475// Assembles boolean materializations after an instruction.
2476void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2477                                        FlagsCondition condition) {
2478  S390OperandConverter i(this, instr);
2479  ArchOpcode op = instr->arch_opcode();
2480  bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
2481
2482  // Overflow checked for add/sub only.
2483  DCHECK((condition != kOverflow && condition != kNotOverflow) ||
2484         (op == kS390_Add32 || kS390_Add64 || op == kS390_Sub32 ||
2485          op == kS390_Sub64));
2486
2487  // Materialize a full 32-bit 1 or 0 value. The result register is always the
2488  // last output of the instruction.
2489  DCHECK_NE(0u, instr->OutputCount());
2490  Register reg = i.OutputRegister(instr->OutputCount() - 1);
2491  Condition cond = FlagsConditionToCondition(condition, op);
2492  Label done;
2493  if (check_unordered) {
2494    __ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
2495                                                              : Operand(1));
2496    __ bunordered(&done);
2497  }
2498  __ LoadImmP(reg, Operand::Zero());
2499  __ LoadImmP(kScratchReg, Operand(1));
2500  // locr is sufficient since reg's upper 32 is guarrantee to be 0
2501  __ locr(cond, reg, kScratchReg);
2502  __ bind(&done);
2503}
2504
2505void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2506  S390OperandConverter i(this, instr);
2507  Register input = i.InputRegister(0);
2508  for (size_t index = 2; index < instr->InputCount(); index += 2) {
2509    __ Cmp32(input, Operand(i.InputInt32(index + 0)));
2510    __ beq(GetLabel(i.InputRpo(index + 1)));
2511  }
2512  AssembleArchJump(i.InputRpo(1));
2513}
2514
2515void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2516  S390OperandConverter i(this, instr);
2517  Register input = i.InputRegister(0);
2518  int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
2519  Label** cases = zone()->NewArray<Label*>(case_count);
2520  for (int32_t index = 0; index < case_count; ++index) {
2521    cases[index] = GetLabel(i.InputRpo(index + 2));
2522  }
2523  Label* const table = AddJumpTable(cases, case_count);
2524  __ CmpLogicalP(input, Operand(case_count));
2525  __ bge(GetLabel(i.InputRpo(1)));
2526  __ larl(kScratchReg, table);
2527  __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
2528  __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
2529  __ Jump(kScratchReg);
2530}
2531
2532CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
2533    int deoptimization_id, SourcePosition pos) {
2534  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
2535  DeoptimizeReason deoptimization_reason =
2536      GetDeoptimizationReason(deoptimization_id);
2537  Deoptimizer::BailoutType bailout_type =
2538      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
2539                                                   : Deoptimizer::EAGER;
2540  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
2541      isolate(), deoptimization_id, bailout_type);
2542  // TODO(turbofan): We should be able to generate better code by sharing the
2543  // actual final call site and just bl'ing to it here, similar to what we do
2544  // in the lithium backend.
2545  if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
2546  __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
2547  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
2548  return kSuccess;
2549}
2550
2551void CodeGenerator::FinishFrame(Frame* frame) {
2552  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2553  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2554
2555  // Save callee-saved Double registers.
2556  if (double_saves != 0) {
2557    frame->AlignSavedCalleeRegisterSlots();
2558    DCHECK(kNumCalleeSavedDoubles ==
2559           base::bits::CountPopulation32(double_saves));
2560    frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
2561                                            (kDoubleSize / kPointerSize));
2562  }
2563  // Save callee-saved registers.
2564  const RegList saves = descriptor->CalleeSavedRegisters();
2565  if (saves != 0) {
2566    // register save area does not include the fp or constant pool pointer.
2567    const int num_saves = kNumCalleeSaved - 1;
2568    DCHECK(num_saves == base::bits::CountPopulation32(saves));
2569    frame->AllocateSavedCalleeRegisterSlots(num_saves);
2570  }
2571}
2572
2573void CodeGenerator::AssembleConstructFrame() {
2574  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2575
2576  if (frame_access_state()->has_frame()) {
2577    if (descriptor->IsCFunctionCall()) {
2578      __ Push(r14, fp);
2579      __ LoadRR(fp, sp);
2580    } else if (descriptor->IsJSFunctionCall()) {
2581      __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
2582      if (descriptor->PushArgumentCount()) {
2583        __ Push(kJavaScriptCallArgCountRegister);
2584      }
2585    } else {
2586      StackFrame::Type type = info()->GetOutputStackFrameType();
2587      // TODO(mbrandy): Detect cases where ip is the entrypoint (for
2588      // efficient intialization of the constant pool pointer register).
2589      __ StubPrologue(type);
2590    }
2591  }
2592
2593  int shrink_slots =
2594      frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
2595  if (info()->is_osr()) {
2596    // TurboFan OSR-compiled functions cannot be entered directly.
2597    __ Abort(kShouldNotDirectlyEnterOsrFunction);
2598
2599    // Unoptimized code jumps directly to this entrypoint while the unoptimized
2600    // frame is still on the stack. Optimized code uses OSR values directly from
2601    // the unoptimized frame. Thus, all that needs to be done is to allocate the
2602    // remaining stack slots.
2603    if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
2604    osr_pc_offset_ = __ pc_offset();
2605    shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
2606  }
2607
2608  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2609  if (shrink_slots > 0) {
2610    __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
2611  }
2612
2613  // Save callee-saved Double registers.
2614  if (double_saves != 0) {
2615    __ MultiPushDoubles(double_saves);
2616    DCHECK(kNumCalleeSavedDoubles ==
2617           base::bits::CountPopulation32(double_saves));
2618  }
2619
2620  // Save callee-saved registers.
2621  const RegList saves = descriptor->CalleeSavedRegisters();
2622  if (saves != 0) {
2623    __ MultiPush(saves);
2624    // register save area does not include the fp or constant pool pointer.
2625  }
2626}
2627
2628void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2629  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2630  int pop_count = static_cast<int>(descriptor->StackParameterCount());
2631
2632  // Restore registers.
2633  const RegList saves = descriptor->CalleeSavedRegisters();
2634  if (saves != 0) {
2635    __ MultiPop(saves);
2636  }
2637
2638  // Restore double registers.
2639  const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2640  if (double_saves != 0) {
2641    __ MultiPopDoubles(double_saves);
2642  }
2643
2644  S390OperandConverter g(this, nullptr);
2645  if (descriptor->IsCFunctionCall()) {
2646    AssembleDeconstructFrame();
2647  } else if (frame_access_state()->has_frame()) {
2648    // Canonicalize JSFunction return sites for now unless they have an variable
2649    // number of stack slot pops
2650    if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2651      if (return_label_.is_bound()) {
2652        __ b(&return_label_);
2653        return;
2654      } else {
2655        __ bind(&return_label_);
2656        AssembleDeconstructFrame();
2657      }
2658    } else {
2659      AssembleDeconstructFrame();
2660    }
2661  }
2662  if (pop->IsImmediate()) {
2663    DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
2664    pop_count += g.ToConstant(pop).ToInt32();
2665  } else {
2666    __ Drop(g.ToRegister(pop));
2667  }
2668  __ Drop(pop_count);
2669  __ Ret();
2670}
2671
2672void CodeGenerator::AssembleMove(InstructionOperand* source,
2673                                 InstructionOperand* destination) {
2674  S390OperandConverter g(this, nullptr);
2675  // Dispatch on the source and destination operand kinds.  Not all
2676  // combinations are possible.
2677  if (source->IsRegister()) {
2678    DCHECK(destination->IsRegister() || destination->IsStackSlot());
2679    Register src = g.ToRegister(source);
2680    if (destination->IsRegister()) {
2681      __ Move(g.ToRegister(destination), src);
2682    } else {
2683      __ StoreP(src, g.ToMemOperand(destination));
2684    }
2685  } else if (source->IsStackSlot()) {
2686    DCHECK(destination->IsRegister() || destination->IsStackSlot());
2687    MemOperand src = g.ToMemOperand(source);
2688    if (destination->IsRegister()) {
2689      __ LoadP(g.ToRegister(destination), src);
2690    } else {
2691      Register temp = kScratchReg;
2692      __ LoadP(temp, src, r0);
2693      __ StoreP(temp, g.ToMemOperand(destination));
2694    }
2695  } else if (source->IsConstant()) {
2696    Constant src = g.ToConstant(source);
2697    if (destination->IsRegister() || destination->IsStackSlot()) {
2698      Register dst =
2699          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
2700      switch (src.type()) {
2701        case Constant::kInt32:
2702#if V8_TARGET_ARCH_S390X
2703          if (RelocInfo::IsWasmSizeReference(src.rmode())) {
2704#else
2705          if (RelocInfo::IsWasmReference(src.rmode())) {
2706#endif
2707            __ mov(dst, Operand(src.ToInt32(), src.rmode()));
2708          } else {
2709            __ Load(dst, Operand(src.ToInt32()));
2710          }
2711          break;
2712        case Constant::kInt64:
2713#if V8_TARGET_ARCH_S390X
2714          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
2715            __ mov(dst, Operand(src.ToInt64(), src.rmode()));
2716          } else {
2717            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
2718            __ Load(dst, Operand(src.ToInt64()));
2719          }
2720#else
2721          __ mov(dst, Operand(src.ToInt64()));
2722#endif  // V8_TARGET_ARCH_S390X
2723          break;
2724        case Constant::kFloat32:
2725          __ Move(dst,
2726                  isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2727          break;
2728        case Constant::kFloat64:
2729          __ Move(dst,
2730                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2731          break;
2732        case Constant::kExternalReference:
2733          __ mov(dst, Operand(src.ToExternalReference()));
2734          break;
2735        case Constant::kHeapObject: {
2736          Handle<HeapObject> src_object = src.ToHeapObject();
2737          Heap::RootListIndex index;
2738          if (IsMaterializableFromRoot(src_object, &index)) {
2739            __ LoadRoot(dst, index);
2740          } else {
2741            __ Move(dst, src_object);
2742          }
2743          break;
2744        }
2745        case Constant::kRpoNumber:
2746          UNREACHABLE();  // TODO(dcarney): loading RPO constants on S390.
2747          break;
2748      }
2749      if (destination->IsStackSlot()) {
2750        __ StoreP(dst, g.ToMemOperand(destination), r0);
2751      }
2752    } else {
2753      DoubleRegister dst = destination->IsFPRegister()
2754                               ? g.ToDoubleRegister(destination)
2755                               : kScratchDoubleReg;
2756      double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
2757                                                        : src.ToFloat64();
2758      if (src.type() == Constant::kFloat32) {
2759        __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
2760      } else {
2761        __ LoadDoubleLiteral(dst, value, kScratchReg);
2762      }
2763
2764      if (destination->IsFPStackSlot()) {
2765        __ StoreDouble(dst, g.ToMemOperand(destination));
2766      }
2767    }
2768  } else if (source->IsFPRegister()) {
2769    DoubleRegister src = g.ToDoubleRegister(source);
2770    if (destination->IsFPRegister()) {
2771      DoubleRegister dst = g.ToDoubleRegister(destination);
2772      __ Move(dst, src);
2773    } else {
2774      DCHECK(destination->IsFPStackSlot());
2775      LocationOperand* op = LocationOperand::cast(source);
2776      if (op->representation() == MachineRepresentation::kFloat64) {
2777        __ StoreDouble(src, g.ToMemOperand(destination));
2778      } else {
2779        __ StoreFloat32(src, g.ToMemOperand(destination));
2780      }
2781    }
2782  } else if (source->IsFPStackSlot()) {
2783    DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2784    MemOperand src = g.ToMemOperand(source);
2785    if (destination->IsFPRegister()) {
2786      LocationOperand* op = LocationOperand::cast(source);
2787      if (op->representation() == MachineRepresentation::kFloat64) {
2788        __ LoadDouble(g.ToDoubleRegister(destination), src);
2789      } else {
2790        __ LoadFloat32(g.ToDoubleRegister(destination), src);
2791      }
2792    } else {
2793      LocationOperand* op = LocationOperand::cast(source);
2794      DoubleRegister temp = kScratchDoubleReg;
2795      if (op->representation() == MachineRepresentation::kFloat64) {
2796        __ LoadDouble(temp, src);
2797        __ StoreDouble(temp, g.ToMemOperand(destination));
2798      } else {
2799        __ LoadFloat32(temp, src);
2800        __ StoreFloat32(temp, g.ToMemOperand(destination));
2801      }
2802    }
2803  } else {
2804    UNREACHABLE();
2805  }
2806}
2807
2808void CodeGenerator::AssembleSwap(InstructionOperand* source,
2809                                 InstructionOperand* destination) {
2810  S390OperandConverter g(this, nullptr);
2811  // Dispatch on the source and destination operand kinds.  Not all
2812  // combinations are possible.
2813  if (source->IsRegister()) {
2814    // Register-register.
2815    Register temp = kScratchReg;
2816    Register src = g.ToRegister(source);
2817    if (destination->IsRegister()) {
2818      Register dst = g.ToRegister(destination);
2819      __ LoadRR(temp, src);
2820      __ LoadRR(src, dst);
2821      __ LoadRR(dst, temp);
2822    } else {
2823      DCHECK(destination->IsStackSlot());
2824      MemOperand dst = g.ToMemOperand(destination);
2825      __ LoadRR(temp, src);
2826      __ LoadP(src, dst);
2827      __ StoreP(temp, dst);
2828    }
2829#if V8_TARGET_ARCH_S390X
2830  } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
2831#else
2832  } else if (source->IsStackSlot()) {
2833    DCHECK(destination->IsStackSlot());
2834#endif
2835    Register temp_0 = kScratchReg;
2836    Register temp_1 = r0;
2837    MemOperand src = g.ToMemOperand(source);
2838    MemOperand dst = g.ToMemOperand(destination);
2839    __ LoadP(temp_0, src);
2840    __ LoadP(temp_1, dst);
2841    __ StoreP(temp_0, dst);
2842    __ StoreP(temp_1, src);
2843  } else if (source->IsFPRegister()) {
2844    DoubleRegister temp = kScratchDoubleReg;
2845    DoubleRegister src = g.ToDoubleRegister(source);
2846    if (destination->IsFPRegister()) {
2847      DoubleRegister dst = g.ToDoubleRegister(destination);
2848      __ ldr(temp, src);
2849      __ ldr(src, dst);
2850      __ ldr(dst, temp);
2851    } else {
2852      DCHECK(destination->IsFPStackSlot());
2853      MemOperand dst = g.ToMemOperand(destination);
2854      __ ldr(temp, src);
2855      __ LoadDouble(src, dst);
2856      __ StoreDouble(temp, dst);
2857    }
2858#if !V8_TARGET_ARCH_S390X
2859  } else if (source->IsFPStackSlot()) {
2860    DCHECK(destination->IsFPStackSlot());
2861    DoubleRegister temp_0 = kScratchDoubleReg;
2862    DoubleRegister temp_1 = d0;
2863    MemOperand src = g.ToMemOperand(source);
2864    MemOperand dst = g.ToMemOperand(destination);
2865    // TODO(joransiu): MVC opportunity
2866    __ LoadDouble(temp_0, src);
2867    __ LoadDouble(temp_1, dst);
2868    __ StoreDouble(temp_0, dst);
2869    __ StoreDouble(temp_1, src);
2870#endif
2871  } else {
2872    // No other combinations are possible.
2873    UNREACHABLE();
2874  }
2875}
2876
2877void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2878  for (size_t index = 0; index < target_count; ++index) {
2879    __ emit_label_addr(targets[index]);
2880  }
2881}
2882
2883void CodeGenerator::EnsureSpaceForLazyDeopt() {
2884  if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2885    return;
2886  }
2887
2888  int space_needed = Deoptimizer::patch_size();
2889  // Ensure that we have enough space after the previous lazy-bailout
2890  // instruction for patching the code here.
2891  int current_pc = masm()->pc_offset();
2892  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2893    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2894    DCHECK_EQ(0, padding_size % 2);
2895    while (padding_size > 0) {
2896      __ nop();
2897      padding_size -= 2;
2898    }
2899  }
2900}
2901
2902#undef __
2903
2904}  // namespace compiler
2905}  // namespace internal
2906}  // namespace v8
2907