1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/adapters.h"
6#include "src/compiler/instruction-selector-impl.h"
7#include "src/compiler/node-matchers.h"
8#include "src/compiler/node-properties.h"
9#include "src/s390/frames-s390.h"
10
11namespace v8 {
12namespace internal {
13namespace compiler {
14
15enum class OperandMode : uint32_t {
16  kNone = 0u,
17  // Immediate mode
18  kShift32Imm = 1u << 0,
19  kShift64Imm = 1u << 1,
20  kInt32Imm = 1u << 2,
21  kInt32Imm_Negate = 1u << 3,
22  kUint32Imm = 1u << 4,
23  kInt20Imm = 1u << 5,
24  kUint12Imm = 1u << 6,
25  // Instr format
26  kAllowRRR = 1u << 7,
27  kAllowRM = 1u << 8,
28  kAllowRI = 1u << 9,
29  kAllowRRI = 1u << 10,
30  kAllowRRM = 1u << 11,
31  // Useful combination
32  kAllowImmediate = kAllowRI | kAllowRRI,
33  kAllowMemoryOperand = kAllowRM | kAllowRRM,
34  kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
35  kBitWiseCommonMode = kAllowRI,
36  kArithmeticCommonMode = kAllowRM | kAllowRI
37};
38
39typedef base::Flags<OperandMode, uint32_t> OperandModes;
40DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
41OperandModes immediateModeMask =
42    OperandMode::kShift32Imm | OperandMode::kShift64Imm |
43    OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
44    OperandMode::kUint32Imm | OperandMode::kInt20Imm;
45
46#define AndOperandMode                                              \
47  ((OperandMode::kBitWiseCommonMode | OperandMode::kUint32Imm |     \
48    OperandMode::kAllowRM | (CpuFeatures::IsSupported(DISTINCT_OPS) \
49                                 ? OperandMode::kAllowRRR           \
50                                 : OperandMode::kBitWiseCommonMode)))
51
52#define OrOperandMode AndOperandMode
53#define XorOperandMode AndOperandMode
54
55#define ShiftOperandMode                                         \
56  ((OperandMode::kBitWiseCommonMode | OperandMode::kShift64Imm | \
57    (CpuFeatures::IsSupported(DISTINCT_OPS)                      \
58         ? OperandMode::kAllowRRR                                \
59         : OperandMode::kBitWiseCommonMode)))
60
61#define AddOperandMode                                            \
62  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
63    (CpuFeatures::IsSupported(DISTINCT_OPS)                       \
64         ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI)      \
65         : OperandMode::kArithmeticCommonMode)))
66#define SubOperandMode                                                   \
67  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
68    (CpuFeatures::IsSupported(DISTINCT_OPS)                              \
69         ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI)             \
70         : OperandMode::kArithmeticCommonMode)))
71#define MulOperandMode \
72  (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
73
74// Adds S390-specific methods for generating operands.
75class S390OperandGenerator final : public OperandGenerator {
76 public:
77  explicit S390OperandGenerator(InstructionSelector* selector)
78      : OperandGenerator(selector) {}
79
80  InstructionOperand UseOperand(Node* node, OperandModes mode) {
81    if (CanBeImmediate(node, mode)) {
82      return UseImmediate(node);
83    }
84    return UseRegister(node);
85  }
86
87  InstructionOperand UseAnyExceptImmediate(Node* node) {
88    if (NodeProperties::IsConstant(node))
89      return UseRegister(node);
90    else
91      return Use(node);
92  }
93
94  int64_t GetImmediate(Node* node) {
95    if (node->opcode() == IrOpcode::kInt32Constant)
96      return OpParameter<int32_t>(node);
97    else if (node->opcode() == IrOpcode::kInt64Constant)
98      return OpParameter<int64_t>(node);
99    else
100      UNIMPLEMENTED();
101    return 0L;
102  }
103
104  bool CanBeImmediate(Node* node, OperandModes mode) {
105    int64_t value;
106    if (node->opcode() == IrOpcode::kInt32Constant)
107      value = OpParameter<int32_t>(node);
108    else if (node->opcode() == IrOpcode::kInt64Constant)
109      value = OpParameter<int64_t>(node);
110    else
111      return false;
112    return CanBeImmediate(value, mode);
113  }
114
115  bool CanBeImmediate(int64_t value, OperandModes mode) {
116    if (mode & OperandMode::kShift32Imm)
117      return 0 <= value && value < 32;
118    else if (mode & OperandMode::kShift64Imm)
119      return 0 <= value && value < 64;
120    else if (mode & OperandMode::kInt32Imm)
121      return is_int32(value);
122    else if (mode & OperandMode::kInt32Imm_Negate)
123      return is_int32(-value);
124    else if (mode & OperandMode::kUint32Imm)
125      return is_uint32(value);
126    else if (mode & OperandMode::kInt20Imm)
127      return is_int20(value);
128    else if (mode & OperandMode::kUint12Imm)
129      return is_uint12(value);
130    else
131      return false;
132  }
133
134  bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
135                          int effect_level) {
136    if (input->opcode() != IrOpcode::kLoad ||
137        !selector()->CanCover(user, input)) {
138      return false;
139    }
140
141    if (effect_level != selector()->GetEffectLevel(input)) {
142      return false;
143    }
144
145    MachineRepresentation rep =
146        LoadRepresentationOf(input->op()).representation();
147    switch (opcode) {
148      case kS390_Cmp64:
149      case kS390_LoadAndTestWord64:
150        return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
151      case kS390_LoadAndTestWord32:
152      case kS390_Cmp32:
153        return rep == MachineRepresentation::kWord32;
154      default:
155        break;
156    }
157    return false;
158  }
159
160  AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base,
161                                             Node* displacement,
162                                             DisplacementMode displacement_mode,
163                                             InstructionOperand inputs[],
164                                             size_t* input_count) {
165    AddressingMode mode = kMode_MRI;
166    if (base != nullptr) {
167      inputs[(*input_count)++] = UseRegister(base);
168      if (index != nullptr) {
169        inputs[(*input_count)++] = UseRegister(index);
170        if (displacement != nullptr) {
171          inputs[(*input_count)++] = displacement_mode
172                                         ? UseNegatedImmediate(displacement)
173                                         : UseImmediate(displacement);
174          mode = kMode_MRRI;
175        } else {
176          mode = kMode_MRR;
177        }
178      } else {
179        if (displacement == nullptr) {
180          mode = kMode_MR;
181        } else {
182          inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
183                                         ? UseNegatedImmediate(displacement)
184                                         : UseImmediate(displacement);
185          mode = kMode_MRI;
186        }
187      }
188    } else {
189      DCHECK_NOT_NULL(index);
190      inputs[(*input_count)++] = UseRegister(index);
191      if (displacement != nullptr) {
192        inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
193                                       ? UseNegatedImmediate(displacement)
194                                       : UseImmediate(displacement);
195        mode = kMode_MRI;
196      } else {
197        mode = kMode_MR;
198      }
199    }
200    return mode;
201  }
202
203  AddressingMode GetEffectiveAddressMemoryOperand(
204      Node* operand, InstructionOperand inputs[], size_t* input_count,
205      OperandModes immediate_mode = OperandMode::kInt20Imm) {
206#if V8_TARGET_ARCH_S390X
207    BaseWithIndexAndDisplacement64Matcher m(operand,
208                                            AddressOption::kAllowInputSwap);
209#else
210    BaseWithIndexAndDisplacement32Matcher m(operand,
211                                            AddressOption::kAllowInputSwap);
212#endif
213    DCHECK(m.matches());
214    if ((m.displacement() == nullptr ||
215         CanBeImmediate(m.displacement(), immediate_mode))) {
216      DCHECK(m.scale() == 0);
217      return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
218                                         m.displacement_mode(), inputs,
219                                         input_count);
220    } else {
221      inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
222      inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
223      return kMode_MRR;
224    }
225  }
226
227  bool CanBeBetterLeftOperand(Node* node) const {
228    return !selector()->IsLive(node);
229  }
230
231  MachineRepresentation GetRepresentation(Node* node) {
232    return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
233  }
234
235  bool Is64BitOperand(Node* node) {
236    return MachineRepresentation::kWord64 == GetRepresentation(node);
237  }
238};
239
240namespace {
241
242bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
243  switch (opcode) {
244    case kS390_CmpFloat:
245    case kS390_CmpDouble:
246      return true;
247    default:
248      return false;
249  }
250}
251
252bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
253  ArchOpcode opcode = ArchOpcodeField::decode(op);
254  return S390OpcodeOnlySupport12BitDisp(opcode);
255}
256
257#define OpcodeImmMode(op)                                       \
258  (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
259                                      : OperandMode::kInt20Imm)
260
261ArchOpcode SelectLoadOpcode(Node* node) {
262  NodeMatcher m(node);
263  DCHECK(m.IsLoad());
264  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
265  ArchOpcode opcode = kArchNop;
266  switch (load_rep.representation()) {
267    case MachineRepresentation::kFloat32:
268      opcode = kS390_LoadFloat32;
269      break;
270    case MachineRepresentation::kFloat64:
271      opcode = kS390_LoadDouble;
272      break;
273    case MachineRepresentation::kBit:  // Fall through.
274    case MachineRepresentation::kWord8:
275      opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
276      break;
277    case MachineRepresentation::kWord16:
278      opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
279      break;
280#if !V8_TARGET_ARCH_S390X
281    case MachineRepresentation::kTaggedSigned:   // Fall through.
282    case MachineRepresentation::kTaggedPointer:  // Fall through.
283    case MachineRepresentation::kTagged:         // Fall through.
284#endif
285    case MachineRepresentation::kWord32:
286      opcode = kS390_LoadWordU32;
287      break;
288#if V8_TARGET_ARCH_S390X
289    case MachineRepresentation::kTaggedSigned:   // Fall through.
290    case MachineRepresentation::kTaggedPointer:  // Fall through.
291    case MachineRepresentation::kTagged:         // Fall through.
292    case MachineRepresentation::kWord64:
293      opcode = kS390_LoadWord64;
294      break;
295#else
296    case MachineRepresentation::kWord64:  // Fall through.
297#endif
298    case MachineRepresentation::kSimd128:  // Fall through.
299    case MachineRepresentation::kSimd1x4:  // Fall through.
300    case MachineRepresentation::kSimd1x8:  // Fall through.
301    case MachineRepresentation::kSimd1x16:  // Fall through.
302    case MachineRepresentation::kNone:
303    default:
304      UNREACHABLE();
305  }
306  return opcode;
307}
308
309bool AutoZeroExtendsWord32ToWord64(Node* node) {
310#if !V8_TARGET_ARCH_S390X
311  return true;
312#else
313  switch (node->opcode()) {
314    case IrOpcode::kInt32Div:
315    case IrOpcode::kUint32Div:
316    case IrOpcode::kInt32MulHigh:
317    case IrOpcode::kUint32MulHigh:
318    case IrOpcode::kInt32Mod:
319    case IrOpcode::kUint32Mod:
320    case IrOpcode::kWord32Clz:
321    case IrOpcode::kWord32Popcnt:
322      return true;
323    default:
324      return false;
325  }
326  return false;
327#endif
328}
329
330bool ZeroExtendsWord32ToWord64(Node* node) {
331#if !V8_TARGET_ARCH_S390X
332  return true;
333#else
334  switch (node->opcode()) {
335    case IrOpcode::kInt32Add:
336    case IrOpcode::kInt32Sub:
337    case IrOpcode::kWord32And:
338    case IrOpcode::kWord32Or:
339    case IrOpcode::kWord32Xor:
340    case IrOpcode::kWord32Shl:
341    case IrOpcode::kWord32Shr:
342    case IrOpcode::kWord32Sar:
343    case IrOpcode::kInt32Mul:
344    case IrOpcode::kWord32Ror:
345    case IrOpcode::kInt32Div:
346    case IrOpcode::kUint32Div:
347    case IrOpcode::kInt32MulHigh:
348    case IrOpcode::kInt32Mod:
349    case IrOpcode::kUint32Mod:
350    case IrOpcode::kWord32Popcnt:
351      return true;
352    // TODO(john.yan): consider the following case to be valid
353    // case IrOpcode::kWord32Equal:
354    // case IrOpcode::kInt32LessThan:
355    // case IrOpcode::kInt32LessThanOrEqual:
356    // case IrOpcode::kUint32LessThan:
357    // case IrOpcode::kUint32LessThanOrEqual:
358    // case IrOpcode::kUint32MulHigh:
359    //   // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
360    //   the
361    //   // zero-extension is a no-op.
362    //   return true;
363    // case IrOpcode::kProjection: {
364    //   Node* const value = node->InputAt(0);
365    //   switch (value->opcode()) {
366    //     case IrOpcode::kInt32AddWithOverflow:
367    //     case IrOpcode::kInt32SubWithOverflow:
368    //     case IrOpcode::kInt32MulWithOverflow:
369    //       return true;
370    //     default:
371    //       return false;
372    //   }
373    // }
374    case IrOpcode::kLoad: {
375      LoadRepresentation load_rep = LoadRepresentationOf(node->op());
376      switch (load_rep.representation()) {
377        case MachineRepresentation::kWord32:
378          return true;
379        default:
380          return false;
381      }
382    }
383    default:
384      return false;
385  }
386#endif
387}
388
389void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
390  S390OperandGenerator g(selector);
391  selector->Emit(opcode, g.DefineAsRegister(node),
392                 g.UseRegister(node->InputAt(0)));
393}
394
395void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
396  S390OperandGenerator g(selector);
397  selector->Emit(opcode, g.DefineAsRegister(node),
398                 g.UseRegister(node->InputAt(0)),
399                 g.UseRegister(node->InputAt(1)));
400}
401
402#if V8_TARGET_ARCH_S390X
403void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
404              OperandModes operand_mode) {
405  S390OperandGenerator g(selector);
406  selector->Emit(opcode, g.DefineAsRegister(node),
407                 g.UseRegister(node->InputAt(0)),
408                 g.UseOperand(node->InputAt(1), operand_mode));
409}
410
411void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
412                            Node* node) {
413  S390OperandGenerator g(selector);
414  InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
415  InstructionOperand outputs[2];
416  size_t output_count = 0;
417  outputs[output_count++] = g.DefineAsRegister(node);
418
419  Node* success_output = NodeProperties::FindProjection(node, 1);
420  if (success_output) {
421    outputs[output_count++] = g.DefineAsRegister(success_output);
422  }
423
424  selector->Emit(opcode, output_count, outputs, 1, inputs);
425}
426#endif
427
428// Shared routine for multiple binary operations.
429template <typename Matcher>
430void VisitBinop(InstructionSelector* selector, Node* node,
431                InstructionCode opcode, OperandModes operand_mode,
432                FlagsContinuation* cont) {
433  S390OperandGenerator g(selector);
434  Matcher m(node);
435  Node* left = m.left().node();
436  Node* right = m.right().node();
437  InstructionOperand inputs[4];
438  size_t input_count = 0;
439  InstructionOperand outputs[2];
440  size_t output_count = 0;
441
442  // TODO(turbofan): match complex addressing modes.
443  if (left == right) {
444    // If both inputs refer to the same operand, enforce allocating a register
445    // for both of them to ensure that we don't end up generating code like
446    // this:
447    //
448    //   mov rax, [rbp-0x10]
449    //   add rax, [rbp-0x10]
450    //   jo label
451    InstructionOperand const input = g.UseRegister(left);
452    inputs[input_count++] = input;
453    inputs[input_count++] = input;
454  } else if (g.CanBeImmediate(right, operand_mode)) {
455    inputs[input_count++] = g.UseRegister(left);
456    inputs[input_count++] = g.UseImmediate(right);
457  } else {
458    if (node->op()->HasProperty(Operator::kCommutative) &&
459        g.CanBeBetterLeftOperand(right)) {
460      std::swap(left, right);
461    }
462    inputs[input_count++] = g.UseRegister(left);
463    inputs[input_count++] = g.UseRegister(right);
464  }
465
466  if (cont->IsBranch()) {
467    inputs[input_count++] = g.Label(cont->true_block());
468    inputs[input_count++] = g.Label(cont->false_block());
469  }
470
471  if (cont->IsDeoptimize()) {
472    // If we can deoptimize as a result of the binop, we need to make sure that
473    // the deopt inputs are not overwritten by the binop result. One way
474    // to achieve that is to declare the output register as same-as-first.
475    outputs[output_count++] = g.DefineSameAsFirst(node);
476  } else {
477    outputs[output_count++] = g.DefineAsRegister(node);
478  }
479  if (cont->IsSet()) {
480    outputs[output_count++] = g.DefineAsRegister(cont->result());
481  }
482
483  DCHECK_NE(0u, input_count);
484  DCHECK_NE(0u, output_count);
485  DCHECK_GE(arraysize(inputs), input_count);
486  DCHECK_GE(arraysize(outputs), output_count);
487
488  opcode = cont->Encode(opcode);
489  if (cont->IsDeoptimize()) {
490    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
491                             cont->kind(), cont->reason(), cont->frame_state());
492  } else if (cont->IsTrap()) {
493    inputs[input_count++] = g.UseImmediate(cont->trap_id());
494    selector->Emit(opcode, output_count, outputs, input_count, inputs);
495  } else {
496    selector->Emit(opcode, output_count, outputs, input_count, inputs);
497  }
498}
499
500// Shared routine for multiple binary operations.
501template <typename Matcher>
502void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
503                OperandModes operand_mode) {
504  FlagsContinuation cont;
505  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
506}
507
508void VisitBin32op(InstructionSelector* selector, Node* node,
509                  InstructionCode opcode, OperandModes operand_mode,
510                  FlagsContinuation* cont) {
511  S390OperandGenerator g(selector);
512  Int32BinopMatcher m(node);
513  Node* left = m.left().node();
514  Node* right = m.right().node();
515  InstructionOperand inputs[8];
516  size_t input_count = 0;
517  InstructionOperand outputs[2];
518  size_t output_count = 0;
519
520  // match left of TruncateInt64ToInt32
521  if (m.left().IsTruncateInt64ToInt32() && selector->CanCover(node, left)) {
522    left = left->InputAt(0);
523  }
524  // match right of TruncateInt64ToInt32
525  if (m.right().IsTruncateInt64ToInt32() && selector->CanCover(node, right)) {
526    right = right->InputAt(0);
527  }
528
529#if V8_TARGET_ARCH_S390X
530  if ((ZeroExtendsWord32ToWord64(right) || g.CanBeBetterLeftOperand(right)) &&
531      node->op()->HasProperty(Operator::kCommutative) &&
532      !g.CanBeImmediate(right, operand_mode)) {
533    std::swap(left, right);
534  }
535#else
536  if (node->op()->HasProperty(Operator::kCommutative) &&
537      !g.CanBeImmediate(right, operand_mode) &&
538      (g.CanBeBetterLeftOperand(right))) {
539    std::swap(left, right);
540  }
541#endif
542
543  // left is always register
544  InstructionOperand const left_input = g.UseRegister(left);
545  inputs[input_count++] = left_input;
546
547  // TODO(turbofan): match complex addressing modes.
548  if (left == right) {
549    // If both inputs refer to the same operand, enforce allocating a register
550    // for both of them to ensure that we don't end up generating code like
551    // this:
552    //
553    //   mov rax, [rbp-0x10]
554    //   add rax, [rbp-0x10]
555    //   jo label
556    inputs[input_count++] = left_input;
557    // Can only be RR or RRR
558    operand_mode &= OperandMode::kAllowRRR;
559  } else if ((operand_mode & OperandMode::kAllowImmediate) &&
560             g.CanBeImmediate(right, operand_mode)) {
561    inputs[input_count++] = g.UseImmediate(right);
562    // Can only be RI or RRI
563    operand_mode &= OperandMode::kAllowImmediate;
564  } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
565    NodeMatcher mright(right);
566    if (mright.IsLoad() && selector->CanCover(node, right) &&
567        SelectLoadOpcode(right) == kS390_LoadWordU32) {
568      AddressingMode mode =
569          g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
570      opcode |= AddressingModeField::encode(mode);
571      operand_mode &= ~OperandMode::kAllowImmediate;
572      if (operand_mode & OperandMode::kAllowRM)
573        operand_mode &= ~OperandMode::kAllowDistinctOps;
574    } else if (operand_mode & OperandMode::kAllowRM) {
575      DCHECK(!(operand_mode & OperandMode::kAllowRRM));
576      inputs[input_count++] = g.Use(right);
577      // Can not be Immediate
578      operand_mode &=
579          ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
580    } else if (operand_mode & OperandMode::kAllowRRM) {
581      DCHECK(!(operand_mode & OperandMode::kAllowRM));
582      inputs[input_count++] = g.Use(right);
583      // Can not be Immediate
584      operand_mode &= ~OperandMode::kAllowImmediate;
585    } else {
586      UNREACHABLE();
587    }
588  } else {
589    inputs[input_count++] = g.UseRegister(right);
590    // Can only be RR or RRR
591    operand_mode &= OperandMode::kAllowRRR;
592  }
593
594  bool doZeroExt =
595      AutoZeroExtendsWord32ToWord64(node) || !ZeroExtendsWord32ToWord64(left);
596
597  inputs[input_count++] =
598      g.TempImmediate(doZeroExt && (!AutoZeroExtendsWord32ToWord64(node)));
599
600  if (cont->IsBranch()) {
601    inputs[input_count++] = g.Label(cont->true_block());
602    inputs[input_count++] = g.Label(cont->false_block());
603  }
604
605  if (doZeroExt && (operand_mode & OperandMode::kAllowDistinctOps) &&
606      // If we can deoptimize as a result of the binop, we need to make sure
607      // that
608      // the deopt inputs are not overwritten by the binop result. One way
609      // to achieve that is to declare the output register as same-as-first.
610      !cont->IsDeoptimize()) {
611    outputs[output_count++] = g.DefineAsRegister(node);
612  } else {
613    outputs[output_count++] = g.DefineSameAsFirst(node);
614  }
615
616  if (cont->IsSet()) {
617    outputs[output_count++] = g.DefineAsRegister(cont->result());
618  }
619
620  DCHECK_NE(0u, input_count);
621  DCHECK_NE(0u, output_count);
622  DCHECK_GE(arraysize(inputs), input_count);
623  DCHECK_GE(arraysize(outputs), output_count);
624
625  opcode = cont->Encode(opcode);
626
627  if (cont->IsDeoptimize()) {
628    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
629                             cont->kind(), cont->reason(), cont->frame_state());
630  } else if (cont->IsTrap()) {
631    inputs[input_count++] = g.UseImmediate(cont->trap_id());
632    selector->Emit(opcode, output_count, outputs, input_count, inputs);
633  } else {
634    selector->Emit(opcode, output_count, outputs, input_count, inputs);
635  }
636}
637
638void VisitBin32op(InstructionSelector* selector, Node* node, ArchOpcode opcode,
639                  OperandModes operand_mode) {
640  FlagsContinuation cont;
641  VisitBin32op(selector, node, opcode, operand_mode, &cont);
642}
643
644}  // namespace
645
646void InstructionSelector::VisitLoad(Node* node) {
647  S390OperandGenerator g(this);
648  ArchOpcode opcode = SelectLoadOpcode(node);
649  InstructionOperand outputs[1];
650  outputs[0] = g.DefineAsRegister(node);
651  InstructionOperand inputs[3];
652  size_t input_count = 0;
653  AddressingMode mode =
654      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
655  InstructionCode code = opcode | AddressingModeField::encode(mode);
656  Emit(code, 1, outputs, input_count, inputs);
657}
658
659void InstructionSelector::VisitProtectedLoad(Node* node) {
660  // TODO(eholk)
661  UNIMPLEMENTED();
662}
663
664void InstructionSelector::VisitStore(Node* node) {
665  S390OperandGenerator g(this);
666  Node* base = node->InputAt(0);
667  Node* offset = node->InputAt(1);
668  Node* value = node->InputAt(2);
669
670  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
671  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
672  MachineRepresentation rep = store_rep.representation();
673
674  if (write_barrier_kind != kNoWriteBarrier) {
675    DCHECK(CanBeTaggedPointer(rep));
676    AddressingMode addressing_mode;
677    InstructionOperand inputs[3];
678    size_t input_count = 0;
679    inputs[input_count++] = g.UseUniqueRegister(base);
680    // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
681    // for the store itself, so we must check compatibility with both.
682    if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
683      inputs[input_count++] = g.UseImmediate(offset);
684      addressing_mode = kMode_MRI;
685    } else {
686      inputs[input_count++] = g.UseUniqueRegister(offset);
687      addressing_mode = kMode_MRR;
688    }
689    inputs[input_count++] = g.UseUniqueRegister(value);
690    RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
691    switch (write_barrier_kind) {
692      case kNoWriteBarrier:
693        UNREACHABLE();
694        break;
695      case kMapWriteBarrier:
696        record_write_mode = RecordWriteMode::kValueIsMap;
697        break;
698      case kPointerWriteBarrier:
699        record_write_mode = RecordWriteMode::kValueIsPointer;
700        break;
701      case kFullWriteBarrier:
702        record_write_mode = RecordWriteMode::kValueIsAny;
703        break;
704    }
705    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
706    size_t const temp_count = arraysize(temps);
707    InstructionCode code = kArchStoreWithWriteBarrier;
708    code |= AddressingModeField::encode(addressing_mode);
709    code |= MiscField::encode(static_cast<int>(record_write_mode));
710    Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
711  } else {
712    ArchOpcode opcode = kArchNop;
713    NodeMatcher m(value);
714    switch (rep) {
715      case MachineRepresentation::kFloat32:
716        opcode = kS390_StoreFloat32;
717        break;
718      case MachineRepresentation::kFloat64:
719        opcode = kS390_StoreDouble;
720        break;
721      case MachineRepresentation::kBit:  // Fall through.
722      case MachineRepresentation::kWord8:
723        opcode = kS390_StoreWord8;
724        break;
725      case MachineRepresentation::kWord16:
726        opcode = kS390_StoreWord16;
727        break;
728#if !V8_TARGET_ARCH_S390X
729      case MachineRepresentation::kTaggedSigned:   // Fall through.
730      case MachineRepresentation::kTaggedPointer:  // Fall through.
731      case MachineRepresentation::kTagged:  // Fall through.
732#endif
733      case MachineRepresentation::kWord32:
734        opcode = kS390_StoreWord32;
735        if (m.IsWord32ReverseBytes()) {
736          opcode = kS390_StoreReverse32;
737          value = value->InputAt(0);
738        }
739        break;
740#if V8_TARGET_ARCH_S390X
741      case MachineRepresentation::kTaggedSigned:   // Fall through.
742      case MachineRepresentation::kTaggedPointer:  // Fall through.
743      case MachineRepresentation::kTagged:  // Fall through.
744      case MachineRepresentation::kWord64:
745        opcode = kS390_StoreWord64;
746        if (m.IsWord64ReverseBytes()) {
747          opcode = kS390_StoreReverse64;
748          value = value->InputAt(0);
749        }
750        break;
751#else
752      case MachineRepresentation::kWord64:  // Fall through.
753#endif
754      case MachineRepresentation::kSimd128:  // Fall through.
755      case MachineRepresentation::kSimd1x4:  // Fall through.
756      case MachineRepresentation::kSimd1x8:  // Fall through.
757      case MachineRepresentation::kSimd1x16:  // Fall through.
758      case MachineRepresentation::kNone:
759        UNREACHABLE();
760        return;
761    }
762    InstructionOperand inputs[4];
763    size_t input_count = 0;
764    AddressingMode addressing_mode =
765        g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
766    InstructionCode code =
767        opcode | AddressingModeField::encode(addressing_mode);
768    InstructionOperand value_operand = g.UseRegister(value);
769    inputs[input_count++] = value_operand;
770    Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
771         inputs);
772  }
773}
774
775void InstructionSelector::VisitProtectedStore(Node* node) {
776  // TODO(eholk)
777  UNIMPLEMENTED();
778}
779
780// Architecture supports unaligned access, therefore VisitLoad is used instead
781void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
782
783// Architecture supports unaligned access, therefore VisitStore is used instead
784void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
785
786void InstructionSelector::VisitCheckedLoad(Node* node) {
787  CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
788  S390OperandGenerator g(this);
789  Node* const base = node->InputAt(0);
790  Node* const offset = node->InputAt(1);
791  Node* const length = node->InputAt(2);
792  ArchOpcode opcode = kArchNop;
793  switch (load_rep.representation()) {
794    case MachineRepresentation::kWord8:
795      opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
796      break;
797    case MachineRepresentation::kWord16:
798      opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
799      break;
800    case MachineRepresentation::kWord32:
801      opcode = kCheckedLoadWord32;
802      break;
803#if V8_TARGET_ARCH_S390X
804    case MachineRepresentation::kWord64:
805      opcode = kCheckedLoadWord64;
806      break;
807#endif
808    case MachineRepresentation::kFloat32:
809      opcode = kCheckedLoadFloat32;
810      break;
811    case MachineRepresentation::kFloat64:
812      opcode = kCheckedLoadFloat64;
813      break;
814    case MachineRepresentation::kBit:     // Fall through.
815    case MachineRepresentation::kTaggedSigned:   // Fall through.
816    case MachineRepresentation::kTaggedPointer:  // Fall through.
817    case MachineRepresentation::kTagged:  // Fall through.
818#if !V8_TARGET_ARCH_S390X
819    case MachineRepresentation::kWord64:  // Fall through.
820#endif
821    case MachineRepresentation::kSimd128:  // Fall through.
822    case MachineRepresentation::kSimd1x4:  // Fall through.
823    case MachineRepresentation::kSimd1x8:  // Fall through.
824    case MachineRepresentation::kSimd1x16:  // Fall through.
825    case MachineRepresentation::kNone:
826      UNREACHABLE();
827      return;
828  }
829  AddressingMode addressingMode = kMode_MRR;
830  Emit(opcode | AddressingModeField::encode(addressingMode),
831       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
832       g.UseOperand(length, OperandMode::kUint32Imm));
833}
834
835void InstructionSelector::VisitCheckedStore(Node* node) {
836  MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
837  S390OperandGenerator g(this);
838  Node* const base = node->InputAt(0);
839  Node* const offset = node->InputAt(1);
840  Node* const length = node->InputAt(2);
841  Node* const value = node->InputAt(3);
842  ArchOpcode opcode = kArchNop;
843  switch (rep) {
844    case MachineRepresentation::kWord8:
845      opcode = kCheckedStoreWord8;
846      break;
847    case MachineRepresentation::kWord16:
848      opcode = kCheckedStoreWord16;
849      break;
850    case MachineRepresentation::kWord32:
851      opcode = kCheckedStoreWord32;
852      break;
853#if V8_TARGET_ARCH_S390X
854    case MachineRepresentation::kWord64:
855      opcode = kCheckedStoreWord64;
856      break;
857#endif
858    case MachineRepresentation::kFloat32:
859      opcode = kCheckedStoreFloat32;
860      break;
861    case MachineRepresentation::kFloat64:
862      opcode = kCheckedStoreFloat64;
863      break;
864    case MachineRepresentation::kBit:     // Fall through.
865    case MachineRepresentation::kTaggedSigned:   // Fall through.
866    case MachineRepresentation::kTaggedPointer:  // Fall through.
867    case MachineRepresentation::kTagged:  // Fall through.
868#if !V8_TARGET_ARCH_S390X
869    case MachineRepresentation::kWord64:  // Fall through.
870#endif
871    case MachineRepresentation::kSimd128:  // Fall through.
872    case MachineRepresentation::kSimd1x4:  // Fall through.
873    case MachineRepresentation::kSimd1x8:  // Fall through.
874    case MachineRepresentation::kSimd1x16:  // Fall through.
875    case MachineRepresentation::kNone:
876      UNREACHABLE();
877      return;
878  }
879  AddressingMode addressingMode = kMode_MRR;
880  Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
881       g.UseRegister(base), g.UseRegister(offset),
882       g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
883}
884
885#if 0
886static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
887  int mask_width = base::bits::CountPopulation32(value);
888  int mask_msb = base::bits::CountLeadingZeros32(value);
889  int mask_lsb = base::bits::CountTrailingZeros32(value);
890  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 32))
891    return false;
892  *mb = mask_lsb + mask_width - 1;
893  *me = mask_lsb;
894  return true;
895}
896#endif
897
898#if V8_TARGET_ARCH_S390X
899static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
900  int mask_width = base::bits::CountPopulation64(value);
901  int mask_msb = base::bits::CountLeadingZeros64(value);
902  int mask_lsb = base::bits::CountTrailingZeros64(value);
903  if ((mask_width == 0) || (mask_msb + mask_width + mask_lsb != 64))
904    return false;
905  *mb = mask_lsb + mask_width - 1;
906  *me = mask_lsb;
907  return true;
908}
909#endif
910
911void InstructionSelector::VisitWord32And(Node* node) {
912  VisitBin32op(this, node, kS390_And32, AndOperandMode);
913}
914
915#if V8_TARGET_ARCH_S390X
916void InstructionSelector::VisitWord64And(Node* node) {
917  S390OperandGenerator g(this);
918  Int64BinopMatcher m(node);
919  int mb = 0;
920  int me = 0;
921  if (m.right().HasValue() && IsContiguousMask64(m.right().Value(), &mb, &me)) {
922    int sh = 0;
923    Node* left = m.left().node();
924    if ((m.left().IsWord64Shr() || m.left().IsWord64Shl()) &&
925        CanCover(node, left)) {
926      Int64BinopMatcher mleft(m.left().node());
927      if (mleft.right().IsInRange(0, 63)) {
928        left = mleft.left().node();
929        sh = mleft.right().Value();
930        if (m.left().IsWord64Shr()) {
931          // Adjust the mask such that it doesn't include any rotated bits.
932          if (mb > 63 - sh) mb = 63 - sh;
933          sh = (64 - sh) & 0x3f;
934        } else {
935          // Adjust the mask such that it doesn't include any rotated bits.
936          if (me < sh) me = sh;
937        }
938      }
939    }
940    if (mb >= me) {
941      bool match = false;
942      ArchOpcode opcode;
943      int mask;
944      if (me == 0) {
945        match = true;
946        opcode = kS390_RotLeftAndClearLeft64;
947        mask = mb;
948      } else if (mb == 63) {
949        match = true;
950        opcode = kS390_RotLeftAndClearRight64;
951        mask = me;
952      } else if (sh && me <= sh && m.left().IsWord64Shl()) {
953        match = true;
954        opcode = kS390_RotLeftAndClear64;
955        mask = mb;
956      }
957      if (match) {
958        Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
959             g.TempImmediate(sh), g.TempImmediate(mask));
960        return;
961      }
962    }
963  }
964  VisitBinop<Int64BinopMatcher>(this, node, kS390_And64,
965                                OperandMode::kUint32Imm);
966}
967#endif
968
969void InstructionSelector::VisitWord32Or(Node* node) {
970  VisitBin32op(this, node, kS390_Or32, OrOperandMode);
971}
972
973#if V8_TARGET_ARCH_S390X
974void InstructionSelector::VisitWord64Or(Node* node) {
975  Int64BinopMatcher m(node);
976  VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64,
977                                OperandMode::kUint32Imm);
978}
979#endif
980
981void InstructionSelector::VisitWord32Xor(Node* node) {
982  VisitBin32op(this, node, kS390_Xor32, XorOperandMode);
983}
984
985#if V8_TARGET_ARCH_S390X
986void InstructionSelector::VisitWord64Xor(Node* node) {
987  VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64,
988                                OperandMode::kUint32Imm);
989}
990#endif
991
992void InstructionSelector::VisitWord32Shl(Node* node) {
993  VisitBin32op(this, node, kS390_ShiftLeft32, ShiftOperandMode);
994}
995
996#if V8_TARGET_ARCH_S390X
997void InstructionSelector::VisitWord64Shl(Node* node) {
998  S390OperandGenerator g(this);
999  Int64BinopMatcher m(node);
1000  // TODO(mbrandy): eliminate left sign extension if right >= 32
1001  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
1002    Int64BinopMatcher mleft(m.left().node());
1003    int sh = m.right().Value();
1004    int mb;
1005    int me;
1006    if (mleft.right().HasValue() &&
1007        IsContiguousMask64(mleft.right().Value() << sh, &mb, &me)) {
1008      // Adjust the mask such that it doesn't include any rotated bits.
1009      if (me < sh) me = sh;
1010      if (mb >= me) {
1011        bool match = false;
1012        ArchOpcode opcode;
1013        int mask;
1014        if (me == 0) {
1015          match = true;
1016          opcode = kS390_RotLeftAndClearLeft64;
1017          mask = mb;
1018        } else if (mb == 63) {
1019          match = true;
1020          opcode = kS390_RotLeftAndClearRight64;
1021          mask = me;
1022        } else if (sh && me <= sh) {
1023          match = true;
1024          opcode = kS390_RotLeftAndClear64;
1025          mask = mb;
1026        }
1027        if (match) {
1028          Emit(opcode, g.DefineAsRegister(node),
1029               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
1030               g.TempImmediate(mask));
1031          return;
1032        }
1033      }
1034    }
1035  }
1036  VisitRRO(this, kS390_ShiftLeft64, node, OperandMode::kShift64Imm);
1037}
1038#endif
1039
1040void InstructionSelector::VisitWord32Shr(Node* node) {
1041  VisitBin32op(this, node, kS390_ShiftRight32, ShiftOperandMode);
1042}
1043
1044#if V8_TARGET_ARCH_S390X
1045void InstructionSelector::VisitWord64Shr(Node* node) {
1046  S390OperandGenerator g(this);
1047  Int64BinopMatcher m(node);
1048  if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
1049    Int64BinopMatcher mleft(m.left().node());
1050    int sh = m.right().Value();
1051    int mb;
1052    int me;
1053    if (mleft.right().HasValue() &&
1054        IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
1055      // Adjust the mask such that it doesn't include any rotated bits.
1056      if (mb > 63 - sh) mb = 63 - sh;
1057      sh = (64 - sh) & 0x3f;
1058      if (mb >= me) {
1059        bool match = false;
1060        ArchOpcode opcode;
1061        int mask;
1062        if (me == 0) {
1063          match = true;
1064          opcode = kS390_RotLeftAndClearLeft64;
1065          mask = mb;
1066        } else if (mb == 63) {
1067          match = true;
1068          opcode = kS390_RotLeftAndClearRight64;
1069          mask = me;
1070        }
1071        if (match) {
1072          Emit(opcode, g.DefineAsRegister(node),
1073               g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
1074               g.TempImmediate(mask));
1075          return;
1076        }
1077      }
1078    }
1079  }
1080  VisitRRO(this, kS390_ShiftRight64, node, OperandMode::kShift64Imm);
1081}
1082#endif
1083
1084void InstructionSelector::VisitWord32Sar(Node* node) {
1085  S390OperandGenerator g(this);
1086  Int32BinopMatcher m(node);
1087  // Replace with sign extension for (x << K) >> K where K is 16 or 24.
1088  if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
1089    Int32BinopMatcher mleft(m.left().node());
1090    if (mleft.right().Is(16) && m.right().Is(16)) {
1091      bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
1092      Emit(kS390_ExtendSignWord16,
1093           doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
1094           g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
1095      return;
1096    } else if (mleft.right().Is(24) && m.right().Is(24)) {
1097      bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
1098      Emit(kS390_ExtendSignWord8,
1099           doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
1100           g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
1101      return;
1102    }
1103  }
1104  VisitBin32op(this, node, kS390_ShiftRightArith32, ShiftOperandMode);
1105}
1106
1107#if !V8_TARGET_ARCH_S390X
1108void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
1109                    InstructionCode opcode2, Node* node) {
1110  S390OperandGenerator g(selector);
1111
1112  Node* projection1 = NodeProperties::FindProjection(node, 1);
1113  if (projection1) {
1114    // We use UseUniqueRegister here to avoid register sharing with the output
1115    // registers.
1116    InstructionOperand inputs[] = {
1117        g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
1118        g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
1119
1120    InstructionOperand outputs[] = {
1121        g.DefineAsRegister(node),
1122        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1123
1124    selector->Emit(opcode, 2, outputs, 4, inputs);
1125  } else {
1126    // The high word of the result is not used, so we emit the standard 32 bit
1127    // instruction.
1128    selector->Emit(opcode2, g.DefineSameAsFirst(node),
1129                   g.UseRegister(node->InputAt(0)),
1130                   g.UseRegister(node->InputAt(2)), g.TempImmediate(0));
1131  }
1132}
1133
1134void InstructionSelector::VisitInt32PairAdd(Node* node) {
1135  VisitPairBinop(this, kS390_AddPair, kS390_Add32, node);
1136}
1137
1138void InstructionSelector::VisitInt32PairSub(Node* node) {
1139  VisitPairBinop(this, kS390_SubPair, kS390_Sub32, node);
1140}
1141
1142void InstructionSelector::VisitInt32PairMul(Node* node) {
1143  S390OperandGenerator g(this);
1144  Node* projection1 = NodeProperties::FindProjection(node, 1);
1145  if (projection1) {
1146    InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1147                                   g.UseUniqueRegister(node->InputAt(1)),
1148                                   g.UseUniqueRegister(node->InputAt(2)),
1149                                   g.UseUniqueRegister(node->InputAt(3))};
1150
1151    InstructionOperand outputs[] = {
1152        g.DefineAsRegister(node),
1153        g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
1154
1155    Emit(kS390_MulPair, 2, outputs, 4, inputs);
1156  } else {
1157    // The high word of the result is not used, so we emit the standard 32 bit
1158    // instruction.
1159    Emit(kS390_Mul32, g.DefineSameAsFirst(node),
1160         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(2)),
1161         g.TempImmediate(0));
1162  }
1163}
1164
1165namespace {
1166// Shared routine for multiple shift operations.
1167void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
1168                    Node* node) {
1169  S390OperandGenerator g(selector);
1170  // We use g.UseUniqueRegister here to guarantee that there is
1171  // no register aliasing of input registers with output registers.
1172  Int32Matcher m(node->InputAt(2));
1173  InstructionOperand shift_operand;
1174  if (m.HasValue()) {
1175    shift_operand = g.UseImmediate(m.node());
1176  } else {
1177    shift_operand = g.UseUniqueRegister(m.node());
1178  }
1179
1180  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
1181                                 g.UseUniqueRegister(node->InputAt(1)),
1182                                 shift_operand};
1183
1184  Node* projection1 = NodeProperties::FindProjection(node, 1);
1185
1186  InstructionOperand outputs[2];
1187  InstructionOperand temps[1];
1188  int32_t output_count = 0;
1189  int32_t temp_count = 0;
1190
1191  outputs[output_count++] = g.DefineAsRegister(node);
1192  if (projection1) {
1193    outputs[output_count++] = g.DefineAsRegister(projection1);
1194  } else {
1195    temps[temp_count++] = g.TempRegister();
1196  }
1197
1198  selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
1199}
1200}  // namespace
1201
1202void InstructionSelector::VisitWord32PairShl(Node* node) {
1203  VisitPairShift(this, kS390_ShiftLeftPair, node);
1204}
1205
1206void InstructionSelector::VisitWord32PairShr(Node* node) {
1207  VisitPairShift(this, kS390_ShiftRightPair, node);
1208}
1209
1210void InstructionSelector::VisitWord32PairSar(Node* node) {
1211  VisitPairShift(this, kS390_ShiftRightArithPair, node);
1212}
1213#endif
1214
1215#if V8_TARGET_ARCH_S390X
1216void InstructionSelector::VisitWord64Sar(Node* node) {
1217  VisitRRO(this, kS390_ShiftRightArith64, node, OperandMode::kShift64Imm);
1218}
1219#endif
1220
1221void InstructionSelector::VisitWord32Ror(Node* node) {
1222  // TODO(john): match dst = ror(src1, src2 + imm)
1223  VisitBin32op(this, node, kS390_RotRight32,
1224               OperandMode::kAllowRI | OperandMode::kAllowRRR |
1225                   OperandMode::kAllowRRI | OperandMode::kShift32Imm);
1226}
1227
1228#if V8_TARGET_ARCH_S390X
1229void InstructionSelector::VisitWord64Ror(Node* node) {
1230  VisitRRO(this, kS390_RotRight64, node, OperandMode::kShift64Imm);
1231}
1232#endif
1233
1234void InstructionSelector::VisitWord32Clz(Node* node) {
1235  VisitRR(this, kS390_Cntlz32, node);
1236}
1237
1238#if V8_TARGET_ARCH_S390X
1239void InstructionSelector::VisitWord64Clz(Node* node) {
1240  S390OperandGenerator g(this);
1241  Emit(kS390_Cntlz64, g.DefineAsRegister(node),
1242       g.UseRegister(node->InputAt(0)));
1243}
1244#endif
1245
1246void InstructionSelector::VisitWord32Popcnt(Node* node) {
1247  S390OperandGenerator g(this);
1248  Node* value = node->InputAt(0);
1249  Emit(kS390_Popcnt32, g.DefineAsRegister(node), g.UseRegister(value));
1250}
1251
1252#if V8_TARGET_ARCH_S390X
1253void InstructionSelector::VisitWord64Popcnt(Node* node) {
1254  S390OperandGenerator g(this);
1255  Emit(kS390_Popcnt64, g.DefineAsRegister(node),
1256       g.UseRegister(node->InputAt(0)));
1257}
1258#endif
1259
1260void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
1261
1262#if V8_TARGET_ARCH_S390X
1263void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
1264#endif
1265
1266void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
1267
1268#if V8_TARGET_ARCH_S390X
1269void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
1270#endif
1271
1272void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
1273  S390OperandGenerator g(this);
1274  Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
1275       g.UseRegister(node->InputAt(0)));
1276}
1277
1278void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
1279  S390OperandGenerator g(this);
1280  NodeMatcher input(node->InputAt(0));
1281  if (CanCover(node, input.node()) && input.IsLoad()) {
1282    LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
1283    if (load_rep.representation() == MachineRepresentation::kWord32) {
1284      Node* base = input.node()->InputAt(0);
1285      Node* offset = input.node()->InputAt(1);
1286      Emit(kS390_LoadReverse32 | AddressingModeField::encode(kMode_MRR),
1287           // TODO(john.yan): one of the base and offset can be imm.
1288           g.DefineAsRegister(node), g.UseRegister(base),
1289           g.UseRegister(offset));
1290      return;
1291    }
1292  }
1293  Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
1294       g.UseRegister(node->InputAt(0)));
1295}
1296
1297void InstructionSelector::VisitInt32Add(Node* node) {
1298  VisitBin32op(this, node, kS390_Add32, AddOperandMode);
1299}
1300
1301#if V8_TARGET_ARCH_S390X
1302void InstructionSelector::VisitInt64Add(Node* node) {
1303  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
1304                                OperandMode::kInt32Imm);
1305}
1306#endif
1307
1308void InstructionSelector::VisitInt32Sub(Node* node) {
1309  S390OperandGenerator g(this);
1310  Int32BinopMatcher m(node);
1311  if (m.left().Is(0)) {
1312    Node* right = m.right().node();
1313    bool doZeroExt = ZeroExtendsWord32ToWord64(right);
1314    Emit(kS390_Neg32, g.DefineAsRegister(node), g.UseRegister(right),
1315         g.TempImmediate(doZeroExt));
1316  } else {
1317    VisitBin32op(this, node, kS390_Sub32, SubOperandMode);
1318  }
1319}
1320
1321#if V8_TARGET_ARCH_S390X
1322void InstructionSelector::VisitInt64Sub(Node* node) {
1323  S390OperandGenerator g(this);
1324  Int64BinopMatcher m(node);
1325  if (m.left().Is(0)) {
1326    Emit(kS390_Neg64, g.DefineAsRegister(node),
1327         g.UseRegister(m.right().node()));
1328  } else {
1329    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
1330                                  OperandMode::kInt32Imm_Negate);
1331  }
1332}
1333#endif
1334
1335namespace {
1336
1337void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1338                  InstructionOperand left, InstructionOperand right,
1339                  FlagsContinuation* cont);
1340
1341#if V8_TARGET_ARCH_S390X
1342void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
1343  S390OperandGenerator g(selector);
1344  Int32BinopMatcher m(node);
1345  Node* left = m.left().node();
1346  Node* right = m.right().node();
1347  if (g.CanBeImmediate(right, OperandMode::kInt32Imm)) {
1348    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1349                   g.UseImmediate(right));
1350  } else {
1351    if (g.CanBeBetterLeftOperand(right)) {
1352      std::swap(left, right);
1353    }
1354    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
1355                   g.Use(right));
1356  }
1357}
1358#endif
1359
1360}  // namespace
1361
1362void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
1363  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1364    FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
1365    return VisitBin32op(this, node, kS390_Mul32WithOverflow,
1366                        OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
1367                        &cont);
1368  }
1369  VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
1370}
1371
1372void InstructionSelector::VisitInt32Mul(Node* node) {
1373  S390OperandGenerator g(this);
1374  Int32BinopMatcher m(node);
1375  Node* left = m.left().node();
1376  Node* right = m.right().node();
1377  if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
1378      base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
1379    int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
1380    bool doZeroExt = !ZeroExtendsWord32ToWord64(left);
1381    InstructionOperand dst =
1382        (doZeroExt && CpuFeatures::IsSupported(DISTINCT_OPS))
1383            ? g.DefineAsRegister(node)
1384            : g.DefineSameAsFirst(node);
1385
1386    Emit(kS390_ShiftLeft32, dst, g.UseRegister(left), g.UseImmediate(power),
1387         g.TempImmediate(doZeroExt));
1388    return;
1389  }
1390  VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
1391}
1392
1393#if V8_TARGET_ARCH_S390X
1394void InstructionSelector::VisitInt64Mul(Node* node) {
1395  S390OperandGenerator g(this);
1396  Int64BinopMatcher m(node);
1397  Node* left = m.left().node();
1398  Node* right = m.right().node();
1399  if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
1400      base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
1401    int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
1402    Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
1403         g.UseImmediate(power));
1404    return;
1405  }
1406  VisitMul(this, node, kS390_Mul64);
1407}
1408#endif
1409
1410void InstructionSelector::VisitInt32MulHigh(Node* node) {
1411  VisitBin32op(this, node, kS390_MulHigh32,
1412               OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps);
1413}
1414
1415void InstructionSelector::VisitUint32MulHigh(Node* node) {
1416  VisitBin32op(this, node, kS390_MulHighU32,
1417               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
1418}
1419
1420void InstructionSelector::VisitInt32Div(Node* node) {
1421  VisitBin32op(this, node, kS390_Div32,
1422               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
1423}
1424
1425#if V8_TARGET_ARCH_S390X
1426void InstructionSelector::VisitInt64Div(Node* node) {
1427  VisitRRR(this, kS390_Div64, node);
1428}
1429#endif
1430
1431void InstructionSelector::VisitUint32Div(Node* node) {
1432  VisitBin32op(this, node, kS390_DivU32,
1433               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
1434}
1435
1436#if V8_TARGET_ARCH_S390X
1437void InstructionSelector::VisitUint64Div(Node* node) {
1438  VisitRRR(this, kS390_DivU64, node);
1439}
1440#endif
1441
1442void InstructionSelector::VisitInt32Mod(Node* node) {
1443  VisitBin32op(this, node, kS390_Mod32,
1444               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
1445}
1446
1447#if V8_TARGET_ARCH_S390X
1448void InstructionSelector::VisitInt64Mod(Node* node) {
1449  VisitRRR(this, kS390_Mod64, node);
1450}
1451#endif
1452
1453void InstructionSelector::VisitUint32Mod(Node* node) {
1454  VisitBin32op(this, node, kS390_ModU32,
1455               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
1456}
1457
1458#if V8_TARGET_ARCH_S390X
1459void InstructionSelector::VisitUint64Mod(Node* node) {
1460  VisitRRR(this, kS390_ModU64, node);
1461}
1462#endif
1463
1464void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1465  VisitRR(this, kS390_Float32ToDouble, node);
1466}
1467
1468void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1469  VisitRR(this, kS390_Int32ToFloat32, node);
1470}
1471
1472void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1473  VisitRR(this, kS390_Uint32ToFloat32, node);
1474}
1475
1476void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1477  VisitRR(this, kS390_Int32ToDouble, node);
1478}
1479
1480void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1481  VisitRR(this, kS390_Uint32ToDouble, node);
1482}
1483
1484void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1485  VisitRR(this, kS390_DoubleToInt32, node);
1486}
1487
1488void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1489  VisitRR(this, kS390_DoubleToUint32, node);
1490}
1491
1492void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1493  VisitRR(this, kS390_DoubleToUint32, node);
1494}
1495
1496#if V8_TARGET_ARCH_S390X
1497void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1498  VisitTryTruncateDouble(this, kS390_Float32ToInt64, node);
1499}
1500
1501void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1502  VisitTryTruncateDouble(this, kS390_DoubleToInt64, node);
1503}
1504
1505void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1506  VisitTryTruncateDouble(this, kS390_Float32ToUint64, node);
1507}
1508
1509void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1510  VisitTryTruncateDouble(this, kS390_DoubleToUint64, node);
1511}
1512
1513void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1514  // TODO(mbrandy): inspect input to see if nop is appropriate.
1515  VisitRR(this, kS390_ExtendSignWord32, node);
1516}
1517
1518void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1519  S390OperandGenerator g(this);
1520  Node* value = node->InputAt(0);
1521  if (ZeroExtendsWord32ToWord64(value)) {
1522    // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
1523    // zero-extension is a no-op.
1524    return EmitIdentity(node);
1525  }
1526  VisitRR(this, kS390_Uint32ToUint64, node);
1527}
1528#endif
1529
1530void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1531  VisitRR(this, kS390_DoubleToFloat32, node);
1532}
1533
1534void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1535  VisitRR(this, kArchTruncateDoubleToI, node);
1536}
1537
1538void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1539  VisitRR(this, kS390_DoubleToInt32, node);
1540}
1541
1542void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1543  VisitRR(this, kS390_Float32ToInt32, node);
1544}
1545
1546void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1547  VisitRR(this, kS390_Float32ToUint32, node);
1548}
1549
1550#if V8_TARGET_ARCH_S390X
1551void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1552  // TODO(mbrandy): inspect input to see if nop is appropriate.
1553  VisitRR(this, kS390_Int64ToInt32, node);
1554}
1555
1556void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1557  VisitRR(this, kS390_Int64ToFloat32, node);
1558}
1559
1560void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1561  VisitRR(this, kS390_Int64ToDouble, node);
1562}
1563
1564void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1565  VisitRR(this, kS390_Uint64ToFloat32, node);
1566}
1567
1568void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1569  VisitRR(this, kS390_Uint64ToDouble, node);
1570}
1571#endif
1572
1573void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1574  VisitRR(this, kS390_BitcastFloat32ToInt32, node);
1575}
1576
1577#if V8_TARGET_ARCH_S390X
1578void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1579  VisitRR(this, kS390_BitcastDoubleToInt64, node);
1580}
1581#endif
1582
1583void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1584  VisitRR(this, kS390_BitcastInt32ToFloat32, node);
1585}
1586
1587#if V8_TARGET_ARCH_S390X
1588void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1589  VisitRR(this, kS390_BitcastInt64ToDouble, node);
1590}
1591#endif
1592
1593void InstructionSelector::VisitFloat32Add(Node* node) {
1594  VisitRRR(this, kS390_AddFloat, node);
1595}
1596
1597void InstructionSelector::VisitFloat64Add(Node* node) {
1598  // TODO(mbrandy): detect multiply-add
1599  VisitRRR(this, kS390_AddDouble, node);
1600}
1601
1602void InstructionSelector::VisitFloat32Sub(Node* node) {
1603  VisitRRR(this, kS390_SubFloat, node);
1604}
1605
1606void InstructionSelector::VisitFloat64Sub(Node* node) {
1607  // TODO(mbrandy): detect multiply-subtract
1608  VisitRRR(this, kS390_SubDouble, node);
1609}
1610
1611void InstructionSelector::VisitFloat32Mul(Node* node) {
1612  VisitRRR(this, kS390_MulFloat, node);
1613}
1614
1615void InstructionSelector::VisitFloat64Mul(Node* node) {
1616  // TODO(mbrandy): detect negate
1617  VisitRRR(this, kS390_MulDouble, node);
1618}
1619
1620void InstructionSelector::VisitFloat32Div(Node* node) {
1621  VisitRRR(this, kS390_DivFloat, node);
1622}
1623
1624void InstructionSelector::VisitFloat64Div(Node* node) {
1625  VisitRRR(this, kS390_DivDouble, node);
1626}
1627
1628void InstructionSelector::VisitFloat64Mod(Node* node) {
1629  S390OperandGenerator g(this);
1630  Emit(kS390_ModDouble, g.DefineAsFixed(node, d1),
1631       g.UseFixed(node->InputAt(0), d1), g.UseFixed(node->InputAt(1), d2))
1632      ->MarkAsCall();
1633}
1634
1635void InstructionSelector::VisitFloat32Max(Node* node) {
1636  VisitRRR(this, kS390_MaxFloat, node);
1637}
1638
1639void InstructionSelector::VisitFloat64Max(Node* node) {
1640  VisitRRR(this, kS390_MaxDouble, node);
1641}
1642
1643void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1644  VisitRR(this, kS390_Float64SilenceNaN, node);
1645}
1646
1647void InstructionSelector::VisitFloat32Min(Node* node) {
1648  VisitRRR(this, kS390_MinFloat, node);
1649}
1650
1651void InstructionSelector::VisitFloat64Min(Node* node) {
1652  VisitRRR(this, kS390_MinDouble, node);
1653}
1654
1655void InstructionSelector::VisitFloat32Abs(Node* node) {
1656  VisitRR(this, kS390_AbsFloat, node);
1657}
1658
1659void InstructionSelector::VisitFloat64Abs(Node* node) {
1660  VisitRR(this, kS390_AbsDouble, node);
1661}
1662
1663void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1664  VisitRR(this, kS390_SqrtFloat, node);
1665}
1666
1667void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1668                                                  InstructionCode opcode) {
1669  S390OperandGenerator g(this);
1670  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
1671      ->MarkAsCall();
1672}
1673
1674void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1675                                                   InstructionCode opcode) {
1676  S390OperandGenerator g(this);
1677  Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
1678       g.UseFixed(node->InputAt(1), d2))
1679      ->MarkAsCall();
1680}
1681
1682void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1683  VisitRR(this, kS390_SqrtDouble, node);
1684}
1685
1686void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1687  VisitRR(this, kS390_FloorFloat, node);
1688}
1689
1690void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1691  VisitRR(this, kS390_FloorDouble, node);
1692}
1693
1694void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1695  VisitRR(this, kS390_CeilFloat, node);
1696}
1697
1698void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1699  VisitRR(this, kS390_CeilDouble, node);
1700}
1701
1702void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1703  VisitRR(this, kS390_TruncateFloat, node);
1704}
1705
1706void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1707  VisitRR(this, kS390_TruncateDouble, node);
1708}
1709
1710void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1711  VisitRR(this, kS390_RoundDouble, node);
1712}
1713
1714void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1715  UNREACHABLE();
1716}
1717
1718void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1719  UNREACHABLE();
1720}
1721
1722void InstructionSelector::VisitFloat32Neg(Node* node) {
1723  VisitRR(this, kS390_NegFloat, node);
1724}
1725
1726void InstructionSelector::VisitFloat64Neg(Node* node) {
1727  VisitRR(this, kS390_NegDouble, node);
1728}
1729
1730void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1731  OperandModes mode = AddOperandMode;
1732  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1733    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1734    return VisitBin32op(this, node, kS390_Add32, mode, &cont);
1735  }
1736  FlagsContinuation cont;
1737  VisitBin32op(this, node, kS390_Add32, mode, &cont);
1738}
1739
1740void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1741  OperandModes mode = SubOperandMode;
1742  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1743    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1744    return VisitBin32op(this, node, kS390_Sub32, mode, &cont);
1745  }
1746  FlagsContinuation cont;
1747  VisitBin32op(this, node, kS390_Sub32, mode, &cont);
1748}
1749
1750#if V8_TARGET_ARCH_S390X
1751void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
1752  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1753    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1754    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
1755                                         OperandMode::kInt32Imm, &cont);
1756  }
1757  FlagsContinuation cont;
1758  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, OperandMode::kInt32Imm,
1759                                &cont);
1760}
1761
1762void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
1763  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1764    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1765    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
1766                                         OperandMode::kInt32Imm_Negate, &cont);
1767  }
1768  FlagsContinuation cont;
1769  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
1770                                OperandMode::kInt32Imm_Negate, &cont);
1771}
1772#endif
1773
1774static bool CompareLogical(FlagsContinuation* cont) {
1775  switch (cont->condition()) {
1776    case kUnsignedLessThan:
1777    case kUnsignedGreaterThanOrEqual:
1778    case kUnsignedLessThanOrEqual:
1779    case kUnsignedGreaterThan:
1780      return true;
1781    default:
1782      return false;
1783  }
1784  UNREACHABLE();
1785  return false;
1786}
1787
1788namespace {
1789
1790// Shared routine for multiple compare operations.
1791void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1792                  InstructionOperand left, InstructionOperand right,
1793                  FlagsContinuation* cont) {
1794  S390OperandGenerator g(selector);
1795  opcode = cont->Encode(opcode);
1796  if (cont->IsBranch()) {
1797    selector->Emit(opcode, g.NoOutput(), left, right,
1798                   g.Label(cont->true_block()), g.Label(cont->false_block()));
1799  } else if (cont->IsDeoptimize()) {
1800    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
1801                             cont->reason(), cont->frame_state());
1802  } else if (cont->IsSet()) {
1803    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1804  } else {
1805    DCHECK(cont->IsTrap());
1806    selector->Emit(opcode, g.NoOutput(), left, right,
1807                   g.UseImmediate(cont->trap_id()));
1808  }
1809}
1810
1811void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1812                          Node* value, InstructionCode opcode,
1813                          FlagsContinuation* cont);
1814
1815void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1816                      Node* node, Node* value, FlagsContinuation* cont,
1817                      bool discard_output = false);
1818
1819// Shared routine for multiple word compare operations.
1820void VisitWordCompare(InstructionSelector* selector, Node* node,
1821                      InstructionCode opcode, FlagsContinuation* cont,
1822                      OperandModes immediate_mode) {
1823  S390OperandGenerator g(selector);
1824  Node* left = node->InputAt(0);
1825  Node* right = node->InputAt(1);
1826
1827  DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) ||
1828         node->opcode() == IrOpcode::kInt32Sub ||
1829         node->opcode() == IrOpcode::kInt64Sub);
1830
1831  InstructionOperand inputs[8];
1832  InstructionOperand outputs[1];
1833  size_t input_count = 0;
1834  size_t output_count = 0;
1835
1836  // If one of the two inputs is an immediate, make sure it's on the right, or
1837  // if one of the two inputs is a memory operand, make sure it's on the left.
1838  int effect_level = selector->GetEffectLevel(node);
1839  if (cont->IsBranch()) {
1840    effect_level = selector->GetEffectLevel(
1841        cont->true_block()->PredecessorAt(0)->control_input());
1842  }
1843
1844  if ((!g.CanBeImmediate(right, immediate_mode) &&
1845       g.CanBeImmediate(left, immediate_mode)) ||
1846      (!g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
1847       g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
1848    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
1849    std::swap(left, right);
1850  }
1851
1852  // check if compare with 0
1853  if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) {
1854    DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
1855    ArchOpcode load_and_test = (opcode == kS390_Cmp32)
1856                                   ? kS390_LoadAndTestWord32
1857                                   : kS390_LoadAndTestWord64;
1858    return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
1859  }
1860
1861  inputs[input_count++] = g.UseRegister(left);
1862  if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
1863    // generate memory operand
1864    AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
1865        right, inputs, &input_count, OpcodeImmMode(opcode));
1866    opcode |= AddressingModeField::encode(addressing_mode);
1867  } else if (g.CanBeImmediate(right, immediate_mode)) {
1868    inputs[input_count++] = g.UseImmediate(right);
1869  } else {
1870    inputs[input_count++] = g.UseAnyExceptImmediate(right);
1871  }
1872
1873  opcode = cont->Encode(opcode);
1874  if (cont->IsBranch()) {
1875    inputs[input_count++] = g.Label(cont->true_block());
1876    inputs[input_count++] = g.Label(cont->false_block());
1877  } else if (cont->IsSet()) {
1878    outputs[output_count++] = g.DefineAsRegister(cont->result());
1879  } else if (cont->IsTrap()) {
1880    inputs[input_count++] = g.UseImmediate(cont->trap_id());
1881  } else {
1882    DCHECK(cont->IsDeoptimize());
1883    // nothing to do
1884  }
1885
1886  DCHECK(input_count <= 8 && output_count <= 1);
1887  if (cont->IsDeoptimize()) {
1888    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
1889                             cont->kind(), cont->reason(), cont->frame_state());
1890  } else {
1891    selector->Emit(opcode, output_count, outputs, input_count, inputs);
1892  }
1893}
1894
1895void VisitWord32Compare(InstructionSelector* selector, Node* node,
1896                        FlagsContinuation* cont) {
1897  OperandModes mode =
1898      (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1899  VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
1900}
1901
1902#if V8_TARGET_ARCH_S390X
1903void VisitWord64Compare(InstructionSelector* selector, Node* node,
1904                        FlagsContinuation* cont) {
1905  OperandModes mode =
1906      (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
1907  VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
1908}
1909#endif
1910
1911// Shared routine for multiple float32 compare operations.
1912void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1913                         FlagsContinuation* cont) {
1914  VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
1915}
1916
1917// Shared routine for multiple float64 compare operations.
1918void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1919                         FlagsContinuation* cont) {
1920  VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
1921}
1922
1923void VisitTestUnderMask(InstructionSelector* selector, Node* node,
1924                        FlagsContinuation* cont) {
1925  DCHECK(node->opcode() == IrOpcode::kWord32And ||
1926         node->opcode() == IrOpcode::kWord64And);
1927  ArchOpcode opcode =
1928      (node->opcode() == IrOpcode::kWord32And) ? kS390_Tst32 : kS390_Tst64;
1929  S390OperandGenerator g(selector);
1930  Node* left = node->InputAt(0);
1931  Node* right = node->InputAt(1);
1932  if (!g.CanBeImmediate(right, OperandMode::kUint32Imm) &&
1933      g.CanBeImmediate(left, OperandMode::kUint32Imm)) {
1934    std::swap(left, right);
1935  }
1936  VisitCompare(selector, opcode, g.UseRegister(left),
1937               g.UseOperand(right, OperandMode::kUint32Imm), cont);
1938}
1939
1940void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
1941                      Node* node, Node* value, FlagsContinuation* cont,
1942                      bool discard_output) {
1943  static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
1944                "LoadAndTest Opcode shouldn't contain other opcodes.");
1945
1946  // TODO(john.yan): Add support for Float32/Float64.
1947  DCHECK(opcode >= kS390_LoadAndTestWord32 ||
1948         opcode <= kS390_LoadAndTestWord64);
1949
1950  S390OperandGenerator g(selector);
1951  InstructionOperand inputs[8];
1952  InstructionOperand outputs[2];
1953  size_t input_count = 0;
1954  size_t output_count = 0;
1955  bool use_value = false;
1956
1957  int effect_level = selector->GetEffectLevel(node);
1958  if (cont->IsBranch()) {
1959    effect_level = selector->GetEffectLevel(
1960        cont->true_block()->PredecessorAt(0)->control_input());
1961  }
1962
1963  if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
1964    // generate memory operand
1965    AddressingMode addressing_mode =
1966        g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
1967    opcode |= AddressingModeField::encode(addressing_mode);
1968  } else {
1969    inputs[input_count++] = g.UseAnyExceptImmediate(value);
1970    use_value = true;
1971  }
1972
1973  if (!discard_output && !use_value) {
1974    outputs[output_count++] = g.DefineAsRegister(value);
1975  }
1976
1977  opcode = cont->Encode(opcode);
1978  if (cont->IsBranch()) {
1979    inputs[input_count++] = g.Label(cont->true_block());
1980    inputs[input_count++] = g.Label(cont->false_block());
1981  } else if (cont->IsSet()) {
1982    outputs[output_count++] = g.DefineAsRegister(cont->result());
1983  } else if (cont->IsTrap()) {
1984    inputs[input_count++] = g.UseImmediate(cont->trap_id());
1985  } else {
1986    DCHECK(cont->IsDeoptimize());
1987    // nothing to do
1988  }
1989
1990  DCHECK(input_count <= 8 && output_count <= 2);
1991  opcode = cont->Encode(opcode);
1992  if (cont->IsDeoptimize()) {
1993    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
1994                             cont->kind(), cont->reason(), cont->frame_state());
1995  } else {
1996    selector->Emit(opcode, output_count, outputs, input_count, inputs);
1997  }
1998}
1999
2000// Shared routine for word comparisons against zero.
2001void VisitWordCompareZero(InstructionSelector* selector, Node* user,
2002                          Node* value, InstructionCode opcode,
2003                          FlagsContinuation* cont) {
2004  // Try to combine with comparisons against 0 by simply inverting the branch.
2005  while (value->opcode() == IrOpcode::kWord32Equal &&
2006         selector->CanCover(user, value)) {
2007    Int32BinopMatcher m(value);
2008    if (!m.right().Is(0)) break;
2009
2010    user = value;
2011    value = m.left().node();
2012    cont->Negate();
2013  }
2014
2015  FlagsCondition fc = cont->condition();
2016  if (selector->CanCover(user, value)) {
2017    switch (value->opcode()) {
2018      case IrOpcode::kWord32Equal: {
2019        cont->OverwriteAndNegateIfEqual(kEqual);
2020        Int32BinopMatcher m(value);
2021        if (m.right().Is(0)) {
2022          // Try to combine the branch with a comparison.
2023          Node* const user = m.node();
2024          Node* const value = m.left().node();
2025          if (selector->CanCover(user, value)) {
2026            switch (value->opcode()) {
2027              case IrOpcode::kInt32Sub:
2028                return VisitWord32Compare(selector, value, cont);
2029              case IrOpcode::kWord32And:
2030                return VisitTestUnderMask(selector, value, cont);
2031              default:
2032                break;
2033            }
2034          }
2035        }
2036        return VisitWord32Compare(selector, value, cont);
2037      }
2038      case IrOpcode::kInt32LessThan:
2039        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2040        return VisitWord32Compare(selector, value, cont);
2041      case IrOpcode::kInt32LessThanOrEqual:
2042        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2043        return VisitWord32Compare(selector, value, cont);
2044      case IrOpcode::kUint32LessThan:
2045        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2046        return VisitWord32Compare(selector, value, cont);
2047      case IrOpcode::kUint32LessThanOrEqual:
2048        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2049        return VisitWord32Compare(selector, value, cont);
2050#if V8_TARGET_ARCH_S390X
2051      case IrOpcode::kWord64Equal: {
2052        cont->OverwriteAndNegateIfEqual(kEqual);
2053        Int64BinopMatcher m(value);
2054        if (m.right().Is(0)) {
2055          // Try to combine the branch with a comparison.
2056          Node* const user = m.node();
2057          Node* const value = m.left().node();
2058          if (selector->CanCover(user, value)) {
2059            switch (value->opcode()) {
2060              case IrOpcode::kInt64Sub:
2061                return VisitWord64Compare(selector, value, cont);
2062              case IrOpcode::kWord64And:
2063                return VisitTestUnderMask(selector, value, cont);
2064              default:
2065                break;
2066            }
2067          }
2068        }
2069        return VisitWord64Compare(selector, value, cont);
2070      }
2071      case IrOpcode::kInt64LessThan:
2072        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2073        return VisitWord64Compare(selector, value, cont);
2074      case IrOpcode::kInt64LessThanOrEqual:
2075        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2076        return VisitWord64Compare(selector, value, cont);
2077      case IrOpcode::kUint64LessThan:
2078        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2079        return VisitWord64Compare(selector, value, cont);
2080      case IrOpcode::kUint64LessThanOrEqual:
2081        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2082        return VisitWord64Compare(selector, value, cont);
2083#endif
2084      case IrOpcode::kFloat32Equal:
2085        cont->OverwriteAndNegateIfEqual(kEqual);
2086        return VisitFloat32Compare(selector, value, cont);
2087      case IrOpcode::kFloat32LessThan:
2088        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2089        return VisitFloat32Compare(selector, value, cont);
2090      case IrOpcode::kFloat32LessThanOrEqual:
2091        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2092        return VisitFloat32Compare(selector, value, cont);
2093      case IrOpcode::kFloat64Equal:
2094        cont->OverwriteAndNegateIfEqual(kEqual);
2095        return VisitFloat64Compare(selector, value, cont);
2096      case IrOpcode::kFloat64LessThan:
2097        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2098        return VisitFloat64Compare(selector, value, cont);
2099      case IrOpcode::kFloat64LessThanOrEqual:
2100        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2101        return VisitFloat64Compare(selector, value, cont);
2102      case IrOpcode::kProjection:
2103        // Check if this is the overflow output projection of an
2104        // <Operation>WithOverflow node.
2105        if (ProjectionIndexOf(value->op()) == 1u) {
2106          // We cannot combine the <Operation>WithOverflow with this branch
2107          // unless the 0th projection (the use of the actual value of the
2108          // <Operation> is either nullptr, which means there's no use of the
2109          // actual value, or was already defined, which means it is scheduled
2110          // *AFTER* this branch).
2111          Node* const node = value->InputAt(0);
2112          Node* const result = NodeProperties::FindProjection(node, 0);
2113          if (result == nullptr || selector->IsDefined(result)) {
2114            switch (node->opcode()) {
2115              case IrOpcode::kInt32AddWithOverflow:
2116                cont->OverwriteAndNegateIfEqual(kOverflow);
2117                return VisitBin32op(selector, node, kS390_Add32, AddOperandMode,
2118                                    cont);
2119              case IrOpcode::kInt32SubWithOverflow:
2120                cont->OverwriteAndNegateIfEqual(kOverflow);
2121                return VisitBin32op(selector, node, kS390_Sub32, SubOperandMode,
2122                                    cont);
2123              case IrOpcode::kInt32MulWithOverflow:
2124                cont->OverwriteAndNegateIfEqual(kNotEqual);
2125                return VisitBin32op(
2126                    selector, node, kS390_Mul32WithOverflow,
2127                    OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
2128                    cont);
2129#if V8_TARGET_ARCH_S390X
2130              case IrOpcode::kInt64AddWithOverflow:
2131                cont->OverwriteAndNegateIfEqual(kOverflow);
2132                return VisitBinop<Int64BinopMatcher>(
2133                    selector, node, kS390_Add64, OperandMode::kInt32Imm, cont);
2134              case IrOpcode::kInt64SubWithOverflow:
2135                cont->OverwriteAndNegateIfEqual(kOverflow);
2136                return VisitBinop<Int64BinopMatcher>(
2137                    selector, node, kS390_Sub64, OperandMode::kInt32Imm_Negate,
2138                    cont);
2139#endif
2140              default:
2141                break;
2142            }
2143          }
2144        }
2145        break;
2146      case IrOpcode::kInt32Sub:
2147        if (fc == kNotEqual || fc == kEqual)
2148          return VisitWord32Compare(selector, value, cont);
2149        break;
2150      case IrOpcode::kWord32And:
2151        return VisitTestUnderMask(selector, value, cont);
2152      case IrOpcode::kLoad: {
2153        LoadRepresentation load_rep = LoadRepresentationOf(value->op());
2154        switch (load_rep.representation()) {
2155          case MachineRepresentation::kWord32:
2156            if (opcode == kS390_LoadAndTestWord32) {
2157              return VisitLoadAndTest(selector, opcode, user, value, cont);
2158            }
2159          default:
2160            break;
2161        }
2162        break;
2163      }
2164      case IrOpcode::kInt32Add:
2165        // can't handle overflow case.
2166        break;
2167      case IrOpcode::kWord32Or:
2168        return VisitBin32op(selector, value, kS390_Or32, OrOperandMode, cont);
2169      case IrOpcode::kWord32Xor:
2170        return VisitBin32op(selector, value, kS390_Xor32, XorOperandMode, cont);
2171      case IrOpcode::kWord32Sar:
2172      case IrOpcode::kWord32Shl:
2173      case IrOpcode::kWord32Shr:
2174      case IrOpcode::kWord32Ror:
2175        // doesn't generate cc, so ignore.
2176        break;
2177#if V8_TARGET_ARCH_S390X
2178      case IrOpcode::kInt64Sub:
2179        if (fc == kNotEqual || fc == kEqual)
2180          return VisitWord64Compare(selector, value, cont);
2181        break;
2182      case IrOpcode::kWord64And:
2183        return VisitTestUnderMask(selector, value, cont);
2184      case IrOpcode::kInt64Add:
2185        // can't handle overflow case.
2186        break;
2187      case IrOpcode::kWord64Or:
2188        // TODO(john.yan): need to handle
2189        break;
2190      case IrOpcode::kWord64Xor:
2191        // TODO(john.yan): need to handle
2192        break;
2193      case IrOpcode::kWord64Sar:
2194      case IrOpcode::kWord64Shl:
2195      case IrOpcode::kWord64Shr:
2196      case IrOpcode::kWord64Ror:
2197        // doesn't generate cc, so ignore
2198        break;
2199#endif
2200      default:
2201        break;
2202    }
2203  }
2204
2205  // Branch could not be combined with a compare, emit LoadAndTest
2206  VisitLoadAndTest(selector, opcode, user, value, cont, true);
2207}
2208
2209void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
2210                            Node* value, FlagsContinuation* cont) {
2211  VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord32, cont);
2212}
2213
2214#if V8_TARGET_ARCH_S390X
2215void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
2216                            Node* value, FlagsContinuation* cont) {
2217  VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord64, cont);
2218}
2219#endif
2220
2221}  // namespace
2222
2223void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
2224                                      BasicBlock* fbranch) {
2225  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
2226  VisitWord32CompareZero(this, branch, branch->InputAt(0), &cont);
2227}
2228
2229void InstructionSelector::VisitDeoptimizeIf(Node* node) {
2230  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2231  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2232      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
2233  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
2234}
2235
2236void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
2237  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
2238  FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
2239      kEqual, p.kind(), p.reason(), node->InputAt(1));
2240  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
2241}
2242
2243void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
2244  FlagsContinuation cont =
2245      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
2246  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
2247}
2248
2249void InstructionSelector::VisitTrapUnless(Node* node,
2250                                          Runtime::FunctionId func_id) {
2251  FlagsContinuation cont =
2252      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
2253  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
2254}
2255
2256void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2257  S390OperandGenerator g(this);
2258  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2259
2260  // Emit either ArchTableSwitch or ArchLookupSwitch.
2261  size_t table_space_cost = 4 + sw.value_range;
2262  size_t table_time_cost = 3;
2263  size_t lookup_space_cost = 3 + 2 * sw.case_count;
2264  size_t lookup_time_cost = sw.case_count;
2265  if (sw.case_count > 0 &&
2266      table_space_cost + 3 * table_time_cost <=
2267          lookup_space_cost + 3 * lookup_time_cost &&
2268      sw.min_value > std::numeric_limits<int32_t>::min()) {
2269    InstructionOperand index_operand = value_operand;
2270    if (sw.min_value) {
2271      index_operand = g.TempRegister();
2272      Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
2273           value_operand, g.TempImmediate(-sw.min_value));
2274    }
2275#if V8_TARGET_ARCH_S390X
2276    InstructionOperand index_operand_zero_ext = g.TempRegister();
2277    Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
2278    index_operand = index_operand_zero_ext;
2279#endif
2280    // Generate a table lookup.
2281    return EmitTableSwitch(sw, index_operand);
2282  }
2283
2284  // Generate a sequence of conditional jumps.
2285  return EmitLookupSwitch(sw, value_operand);
2286}
2287
2288void InstructionSelector::VisitWord32Equal(Node* const node) {
2289  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2290  Int32BinopMatcher m(node);
2291  if (m.right().Is(0)) {
2292    return VisitWord32CompareZero(this, m.node(), m.left().node(), &cont);
2293  }
2294  VisitWord32Compare(this, node, &cont);
2295}
2296
2297void InstructionSelector::VisitInt32LessThan(Node* node) {
2298  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2299  VisitWord32Compare(this, node, &cont);
2300}
2301
2302void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2303  FlagsContinuation cont =
2304      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2305  VisitWord32Compare(this, node, &cont);
2306}
2307
2308void InstructionSelector::VisitUint32LessThan(Node* node) {
2309  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2310  VisitWord32Compare(this, node, &cont);
2311}
2312
2313void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2314  FlagsContinuation cont =
2315      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2316  VisitWord32Compare(this, node, &cont);
2317}
2318
2319#if V8_TARGET_ARCH_S390X
2320void InstructionSelector::VisitWord64Equal(Node* const node) {
2321  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2322  Int64BinopMatcher m(node);
2323  if (m.right().Is(0)) {
2324    return VisitWord64CompareZero(this, m.node(), m.left().node(), &cont);
2325  }
2326  VisitWord64Compare(this, node, &cont);
2327}
2328
2329void InstructionSelector::VisitInt64LessThan(Node* node) {
2330  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2331  VisitWord64Compare(this, node, &cont);
2332}
2333
2334void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2335  FlagsContinuation cont =
2336      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2337  VisitWord64Compare(this, node, &cont);
2338}
2339
2340void InstructionSelector::VisitUint64LessThan(Node* node) {
2341  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2342  VisitWord64Compare(this, node, &cont);
2343}
2344
2345void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2346  FlagsContinuation cont =
2347      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2348  VisitWord64Compare(this, node, &cont);
2349}
2350#endif
2351
2352void InstructionSelector::VisitFloat32Equal(Node* node) {
2353  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2354  VisitFloat32Compare(this, node, &cont);
2355}
2356
2357void InstructionSelector::VisitFloat32LessThan(Node* node) {
2358  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2359  VisitFloat32Compare(this, node, &cont);
2360}
2361
2362void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2363  FlagsContinuation cont =
2364      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2365  VisitFloat32Compare(this, node, &cont);
2366}
2367
2368void InstructionSelector::VisitFloat64Equal(Node* node) {
2369  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2370  VisitFloat64Compare(this, node, &cont);
2371}
2372
2373void InstructionSelector::VisitFloat64LessThan(Node* node) {
2374  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2375  VisitFloat64Compare(this, node, &cont);
2376}
2377
2378void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2379  FlagsContinuation cont =
2380      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2381  VisitFloat64Compare(this, node, &cont);
2382}
2383
2384void InstructionSelector::EmitPrepareArguments(
2385    ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
2386    Node* node) {
2387  S390OperandGenerator g(this);
2388
2389  // Prepare for C function call.
2390  if (descriptor->IsCFunctionCall()) {
2391    Emit(kArchPrepareCallCFunction |
2392             MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
2393         0, nullptr, 0, nullptr);
2394
2395    // Poke any stack arguments.
2396    int slot = kStackFrameExtraParamSlot;
2397    for (PushParameter input : (*arguments)) {
2398      Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
2399           g.TempImmediate(slot));
2400      ++slot;
2401    }
2402  } else {
2403    // Push any stack arguments.
2404    int num_slots = static_cast<int>(descriptor->StackParameterCount());
2405    int slot = 0;
2406    for (PushParameter input : (*arguments)) {
2407      if (slot == 0) {
2408        DCHECK(input.node());
2409        Emit(kS390_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
2410             g.TempImmediate(num_slots));
2411      } else {
2412        // Skip any alignment holes in pushed nodes.
2413        if (input.node()) {
2414          Emit(kS390_StoreToStackSlot, g.NoOutput(),
2415               g.UseRegister(input.node()), g.TempImmediate(slot));
2416        }
2417      }
2418      ++slot;
2419    }
2420  }
2421}
2422
2423bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
2424
2425int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
2426
2427void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2428  S390OperandGenerator g(this);
2429  Emit(kS390_DoubleExtractLowWord32, g.DefineAsRegister(node),
2430       g.UseRegister(node->InputAt(0)));
2431}
2432
2433void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2434  S390OperandGenerator g(this);
2435  Emit(kS390_DoubleExtractHighWord32, g.DefineAsRegister(node),
2436       g.UseRegister(node->InputAt(0)));
2437}
2438
2439void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2440  S390OperandGenerator g(this);
2441  Node* left = node->InputAt(0);
2442  Node* right = node->InputAt(1);
2443  if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
2444      CanCover(node, left)) {
2445    left = left->InputAt(1);
2446    Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
2447         g.UseRegister(right));
2448    return;
2449  }
2450  Emit(kS390_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
2451       g.UseRegister(left), g.UseRegister(right));
2452}
2453
2454void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2455  S390OperandGenerator g(this);
2456  Node* left = node->InputAt(0);
2457  Node* right = node->InputAt(1);
2458  if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
2459      CanCover(node, left)) {
2460    left = left->InputAt(1);
2461    Emit(kS390_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
2462         g.UseRegister(left));
2463    return;
2464  }
2465  Emit(kS390_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
2466       g.UseRegister(left), g.UseRegister(right));
2467}
2468
2469void InstructionSelector::VisitAtomicLoad(Node* node) {
2470  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2471  S390OperandGenerator g(this);
2472  Node* base = node->InputAt(0);
2473  Node* index = node->InputAt(1);
2474  ArchOpcode opcode = kArchNop;
2475  switch (load_rep.representation()) {
2476    case MachineRepresentation::kWord8:
2477      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2478      break;
2479    case MachineRepresentation::kWord16:
2480      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2481      break;
2482    case MachineRepresentation::kWord32:
2483      opcode = kAtomicLoadWord32;
2484      break;
2485    default:
2486      UNREACHABLE();
2487      return;
2488  }
2489  Emit(opcode | AddressingModeField::encode(kMode_MRR),
2490       g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
2491}
2492
2493void InstructionSelector::VisitAtomicStore(Node* node) {
2494  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2495  S390OperandGenerator g(this);
2496  Node* base = node->InputAt(0);
2497  Node* index = node->InputAt(1);
2498  Node* value = node->InputAt(2);
2499  ArchOpcode opcode = kArchNop;
2500  switch (rep) {
2501    case MachineRepresentation::kWord8:
2502      opcode = kAtomicStoreWord8;
2503      break;
2504    case MachineRepresentation::kWord16:
2505      opcode = kAtomicStoreWord16;
2506      break;
2507    case MachineRepresentation::kWord32:
2508      opcode = kAtomicStoreWord32;
2509      break;
2510    default:
2511      UNREACHABLE();
2512      return;
2513  }
2514
2515  InstructionOperand inputs[4];
2516  size_t input_count = 0;
2517  inputs[input_count++] = g.UseUniqueRegister(value);
2518  inputs[input_count++] = g.UseUniqueRegister(base);
2519  inputs[input_count++] = g.UseUniqueRegister(index);
2520  Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
2521       inputs);
2522}
2523
2524// static
2525MachineOperatorBuilder::Flags
2526InstructionSelector::SupportedMachineOperatorFlags() {
2527  return MachineOperatorBuilder::kFloat32RoundDown |
2528         MachineOperatorBuilder::kFloat64RoundDown |
2529         MachineOperatorBuilder::kFloat32RoundUp |
2530         MachineOperatorBuilder::kFloat64RoundUp |
2531         MachineOperatorBuilder::kFloat32RoundTruncate |
2532         MachineOperatorBuilder::kFloat64RoundTruncate |
2533         MachineOperatorBuilder::kFloat64RoundTiesAway |
2534         MachineOperatorBuilder::kWord32Popcnt |
2535         MachineOperatorBuilder::kWord32ReverseBytes |
2536         MachineOperatorBuilder::kWord64ReverseBytes |
2537         MachineOperatorBuilder::kWord64Popcnt;
2538}
2539
2540// static
2541MachineOperatorBuilder::AlignmentRequirements
2542InstructionSelector::AlignmentRequirements() {
2543  return MachineOperatorBuilder::AlignmentRequirements::
2544      FullUnalignedAccessSupport();
2545}
2546
2547}  // namespace compiler
2548}  // namespace internal
2549}  // namespace v8
2550