1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/base/adapters.h"
6#include "src/base/bits.h"
7#include "src/compiler/instruction-selector-impl.h"
8#include "src/compiler/node-matchers.h"
9#include "src/compiler/node-properties.h"
10
11namespace v8 {
12namespace internal {
13namespace compiler {
14
15#define TRACE_UNIMPL() \
16  PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17
18#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19
20
21// Adds Mips-specific methods for generating InstructionOperands.
22class MipsOperandGenerator final : public OperandGenerator {
23 public:
24  explicit MipsOperandGenerator(InstructionSelector* selector)
25      : OperandGenerator(selector) {}
26
27  InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28    if (CanBeImmediate(node, opcode)) {
29      return UseImmediate(node);
30    }
31    return UseRegister(node);
32  }
33
34  bool CanBeImmediate(Node* node, InstructionCode opcode) {
35    Int32Matcher m(node);
36    if (!m.HasValue()) return false;
37    int32_t value = m.Value();
38    switch (ArchOpcodeField::decode(opcode)) {
39      case kMipsShl:
40      case kMipsSar:
41      case kMipsShr:
42        return is_uint5(value);
43      case kMipsXor:
44        return is_uint16(value);
45      case kMipsLdc1:
46      case kMipsSdc1:
47      case kCheckedLoadFloat64:
48      case kCheckedStoreFloat64:
49        return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
50               std::numeric_limits<int16_t>::max() >= (value + kIntSize);
51      default:
52        return is_int16(value);
53    }
54  }
55
56 private:
57  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
58    TRACE_UNIMPL();
59    return false;
60  }
61};
62
63
64static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
65                     Node* node) {
66  MipsOperandGenerator g(selector);
67  selector->Emit(opcode, g.DefineAsRegister(node),
68                 g.UseRegister(node->InputAt(0)),
69                 g.UseRegister(node->InputAt(1)));
70}
71
72
73static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
74                    Node* node) {
75  MipsOperandGenerator g(selector);
76  selector->Emit(opcode, g.DefineAsRegister(node),
77                 g.UseRegister(node->InputAt(0)));
78}
79
80
81static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
82                     Node* node) {
83  MipsOperandGenerator g(selector);
84  selector->Emit(opcode, g.DefineAsRegister(node),
85                 g.UseRegister(node->InputAt(0)),
86                 g.UseOperand(node->InputAt(1), opcode));
87}
88
89
90static void VisitBinop(InstructionSelector* selector, Node* node,
91                       InstructionCode opcode, FlagsContinuation* cont) {
92  MipsOperandGenerator g(selector);
93  Int32BinopMatcher m(node);
94  InstructionOperand inputs[4];
95  size_t input_count = 0;
96  InstructionOperand outputs[2];
97  size_t output_count = 0;
98
99  inputs[input_count++] = g.UseRegister(m.left().node());
100  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
101
102  if (cont->IsBranch()) {
103    inputs[input_count++] = g.Label(cont->true_block());
104    inputs[input_count++] = g.Label(cont->false_block());
105  }
106
107  outputs[output_count++] = g.DefineAsRegister(node);
108  if (cont->IsSet()) {
109    outputs[output_count++] = g.DefineAsRegister(cont->result());
110  }
111
112  DCHECK_NE(0u, input_count);
113  DCHECK_NE(0u, output_count);
114  DCHECK_GE(arraysize(inputs), input_count);
115  DCHECK_GE(arraysize(outputs), output_count);
116
117  opcode = cont->Encode(opcode);
118  if (cont->IsDeoptimize()) {
119    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
120                             cont->frame_state());
121  } else {
122    selector->Emit(opcode, output_count, outputs, input_count, inputs);
123  }
124}
125
126
127static void VisitBinop(InstructionSelector* selector, Node* node,
128                       InstructionCode opcode) {
129  FlagsContinuation cont;
130  VisitBinop(selector, node, opcode, &cont);
131}
132
133
134void InstructionSelector::VisitLoad(Node* node) {
135  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
136  MipsOperandGenerator g(this);
137  Node* base = node->InputAt(0);
138  Node* index = node->InputAt(1);
139
140  ArchOpcode opcode = kArchNop;
141  switch (load_rep.representation()) {
142    case MachineRepresentation::kFloat32:
143      opcode = kMipsLwc1;
144      break;
145    case MachineRepresentation::kFloat64:
146      opcode = kMipsLdc1;
147      break;
148    case MachineRepresentation::kBit:  // Fall through.
149    case MachineRepresentation::kWord8:
150      opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
151      break;
152    case MachineRepresentation::kWord16:
153      opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
154      break;
155    case MachineRepresentation::kTagged:  // Fall through.
156    case MachineRepresentation::kWord32:
157      opcode = kMipsLw;
158      break;
159    case MachineRepresentation::kWord64:   // Fall through.
160    case MachineRepresentation::kSimd128:  // Fall through.
161    case MachineRepresentation::kNone:
162      UNREACHABLE();
163      return;
164  }
165
166  if (g.CanBeImmediate(index, opcode)) {
167    Emit(opcode | AddressingModeField::encode(kMode_MRI),
168         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
169  } else {
170    InstructionOperand addr_reg = g.TempRegister();
171    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
172         g.UseRegister(index), g.UseRegister(base));
173    // Emit desired load opcode, using temp addr_reg.
174    Emit(opcode | AddressingModeField::encode(kMode_MRI),
175         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
176  }
177}
178
179
180void InstructionSelector::VisitStore(Node* node) {
181  MipsOperandGenerator g(this);
182  Node* base = node->InputAt(0);
183  Node* index = node->InputAt(1);
184  Node* value = node->InputAt(2);
185
186  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
187  WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
188  MachineRepresentation rep = store_rep.representation();
189
190  // TODO(mips): I guess this could be done in a better way.
191  if (write_barrier_kind != kNoWriteBarrier) {
192    DCHECK_EQ(MachineRepresentation::kTagged, rep);
193    InstructionOperand inputs[3];
194    size_t input_count = 0;
195    inputs[input_count++] = g.UseUniqueRegister(base);
196    inputs[input_count++] = g.UseUniqueRegister(index);
197    inputs[input_count++] = g.UseUniqueRegister(value);
198    RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
199    switch (write_barrier_kind) {
200      case kNoWriteBarrier:
201        UNREACHABLE();
202        break;
203      case kMapWriteBarrier:
204        record_write_mode = RecordWriteMode::kValueIsMap;
205        break;
206      case kPointerWriteBarrier:
207        record_write_mode = RecordWriteMode::kValueIsPointer;
208        break;
209      case kFullWriteBarrier:
210        record_write_mode = RecordWriteMode::kValueIsAny;
211        break;
212    }
213    InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
214    size_t const temp_count = arraysize(temps);
215    InstructionCode code = kArchStoreWithWriteBarrier;
216    code |= MiscField::encode(static_cast<int>(record_write_mode));
217    Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
218  } else {
219    ArchOpcode opcode = kArchNop;
220    switch (rep) {
221      case MachineRepresentation::kFloat32:
222        opcode = kMipsSwc1;
223        break;
224      case MachineRepresentation::kFloat64:
225        opcode = kMipsSdc1;
226        break;
227      case MachineRepresentation::kBit:  // Fall through.
228      case MachineRepresentation::kWord8:
229        opcode = kMipsSb;
230        break;
231      case MachineRepresentation::kWord16:
232        opcode = kMipsSh;
233        break;
234      case MachineRepresentation::kTagged:  // Fall through.
235      case MachineRepresentation::kWord32:
236        opcode = kMipsSw;
237        break;
238      case MachineRepresentation::kWord64:   // Fall through.
239      case MachineRepresentation::kSimd128:  // Fall through.
240      case MachineRepresentation::kNone:
241        UNREACHABLE();
242        return;
243    }
244
245    if (g.CanBeImmediate(index, opcode)) {
246      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
247           g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
248    } else {
249      InstructionOperand addr_reg = g.TempRegister();
250      Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
251           g.UseRegister(index), g.UseRegister(base));
252      // Emit desired store opcode, using temp addr_reg.
253      Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
254           addr_reg, g.TempImmediate(0), g.UseRegister(value));
255    }
256  }
257}
258
259
260void InstructionSelector::VisitWord32And(Node* node) {
261  MipsOperandGenerator g(this);
262  Int32BinopMatcher m(node);
263  if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
264      m.right().HasValue()) {
265    uint32_t mask = m.right().Value();
266    uint32_t mask_width = base::bits::CountPopulation32(mask);
267    uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
268    if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
269      // The mask must be contiguous, and occupy the least-significant bits.
270      DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
271
272      // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
273      // significant bits.
274      Int32BinopMatcher mleft(m.left().node());
275      if (mleft.right().HasValue()) {
276        // Any shift value can match; int32 shifts use `value % 32`.
277        uint32_t lsb = mleft.right().Value() & 0x1f;
278
279        // Ext cannot extract bits past the register size, however since
280        // shifting the original value would have introduced some zeros we can
281        // still use Ext with a smaller mask and the remaining bits will be
282        // zeros.
283        if (lsb + mask_width > 32) mask_width = 32 - lsb;
284
285        Emit(kMipsExt, g.DefineAsRegister(node),
286             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
287             g.TempImmediate(mask_width));
288        return;
289      }
290      // Other cases fall through to the normal And operation.
291    }
292  }
293  if (m.right().HasValue()) {
294    uint32_t mask = m.right().Value();
295    uint32_t shift = base::bits::CountPopulation32(~mask);
296    uint32_t msb = base::bits::CountLeadingZeros32(~mask);
297    if (shift != 0 && shift != 32 && msb + shift == 32) {
298      // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
299      // and remove constant loading of invereted mask.
300      Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
301           g.TempImmediate(0), g.TempImmediate(shift));
302      return;
303    }
304  }
305  VisitBinop(this, node, kMipsAnd);
306}
307
308
309void InstructionSelector::VisitWord32Or(Node* node) {
310  VisitBinop(this, node, kMipsOr);
311}
312
313
314void InstructionSelector::VisitWord32Xor(Node* node) {
315  Int32BinopMatcher m(node);
316  if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
317      m.right().Is(-1)) {
318    Int32BinopMatcher mleft(m.left().node());
319    if (!mleft.right().HasValue()) {
320      MipsOperandGenerator g(this);
321      Emit(kMipsNor, g.DefineAsRegister(node),
322           g.UseRegister(mleft.left().node()),
323           g.UseRegister(mleft.right().node()));
324      return;
325    }
326  }
327  if (m.right().Is(-1)) {
328    // Use Nor for bit negation and eliminate constant loading for xori.
329    MipsOperandGenerator g(this);
330    Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
331         g.TempImmediate(0));
332    return;
333  }
334  VisitBinop(this, node, kMipsXor);
335}
336
337
338void InstructionSelector::VisitWord32Shl(Node* node) {
339  Int32BinopMatcher m(node);
340  if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
341      m.right().IsInRange(1, 31)) {
342    MipsOperandGenerator g(this);
343    Int32BinopMatcher mleft(m.left().node());
344    // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
345    // contiguous, and the shift immediate non-zero.
346    if (mleft.right().HasValue()) {
347      uint32_t mask = mleft.right().Value();
348      uint32_t mask_width = base::bits::CountPopulation32(mask);
349      uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
350      if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
351        uint32_t shift = m.right().Value();
352        DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
353        DCHECK_NE(0u, shift);
354        if ((shift + mask_width) >= 32) {
355          // If the mask is contiguous and reaches or extends beyond the top
356          // bit, only the shift is needed.
357          Emit(kMipsShl, g.DefineAsRegister(node),
358               g.UseRegister(mleft.left().node()),
359               g.UseImmediate(m.right().node()));
360          return;
361        }
362      }
363    }
364  }
365  VisitRRO(this, kMipsShl, node);
366}
367
368
369void InstructionSelector::VisitWord32Shr(Node* node) {
370  Int32BinopMatcher m(node);
371  if (m.left().IsWord32And() && m.right().HasValue()) {
372    uint32_t lsb = m.right().Value() & 0x1f;
373    Int32BinopMatcher mleft(m.left().node());
374    if (mleft.right().HasValue()) {
375      // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
376      // shifted into the least-significant bits.
377      uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
378      unsigned mask_width = base::bits::CountPopulation32(mask);
379      unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
380      if ((mask_msb + mask_width + lsb) == 32) {
381        MipsOperandGenerator g(this);
382        DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
383        Emit(kMipsExt, g.DefineAsRegister(node),
384             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
385             g.TempImmediate(mask_width));
386        return;
387      }
388    }
389  }
390  VisitRRO(this, kMipsShr, node);
391}
392
393
394void InstructionSelector::VisitWord32Sar(Node* node) {
395  VisitRRO(this, kMipsSar, node);
396}
397
398static void VisitInt32PairBinop(InstructionSelector* selector,
399                                InstructionCode opcode, Node* node) {
400  MipsOperandGenerator g(selector);
401
402  // We use UseUniqueRegister here to avoid register sharing with the output
403  // register.
404  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
405                                 g.UseUniqueRegister(node->InputAt(1)),
406                                 g.UseUniqueRegister(node->InputAt(2)),
407                                 g.UseUniqueRegister(node->InputAt(3))};
408
409  InstructionOperand outputs[] = {
410      g.DefineAsRegister(node),
411      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
412  selector->Emit(opcode, 2, outputs, 4, inputs);
413}
414
415void InstructionSelector::VisitInt32PairAdd(Node* node) {
416  VisitInt32PairBinop(this, kMipsAddPair, node);
417}
418
419void InstructionSelector::VisitInt32PairSub(Node* node) {
420  VisitInt32PairBinop(this, kMipsSubPair, node);
421}
422
423void InstructionSelector::VisitInt32PairMul(Node* node) {
424  VisitInt32PairBinop(this, kMipsMulPair, node);
425}
426
427// Shared routine for multiple shift operations.
428static void VisitWord32PairShift(InstructionSelector* selector,
429                                 InstructionCode opcode, Node* node) {
430  MipsOperandGenerator g(selector);
431  Int32Matcher m(node->InputAt(2));
432  InstructionOperand shift_operand;
433  if (m.HasValue()) {
434    shift_operand = g.UseImmediate(m.node());
435  } else {
436    shift_operand = g.UseUniqueRegister(m.node());
437  }
438
439  // We use UseUniqueRegister here to avoid register sharing with the output
440  // register.
441  InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
442                                 g.UseUniqueRegister(node->InputAt(1)),
443                                 shift_operand};
444
445  InstructionOperand outputs[] = {
446      g.DefineAsRegister(node),
447      g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
448
449  selector->Emit(opcode, 2, outputs, 3, inputs);
450}
451
452void InstructionSelector::VisitWord32PairShl(Node* node) {
453  VisitWord32PairShift(this, kMipsShlPair, node);
454}
455
456void InstructionSelector::VisitWord32PairShr(Node* node) {
457  VisitWord32PairShift(this, kMipsShrPair, node);
458}
459
460void InstructionSelector::VisitWord32PairSar(Node* node) {
461  VisitWord32PairShift(this, kMipsSarPair, node);
462}
463
464void InstructionSelector::VisitWord32Ror(Node* node) {
465  VisitRRO(this, kMipsRor, node);
466}
467
468
469void InstructionSelector::VisitWord32Clz(Node* node) {
470  VisitRR(this, kMipsClz, node);
471}
472
473
474void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
475
476
477void InstructionSelector::VisitWord32Ctz(Node* node) {
478  MipsOperandGenerator g(this);
479  Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
480}
481
482
483void InstructionSelector::VisitWord32Popcnt(Node* node) {
484  MipsOperandGenerator g(this);
485  Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
486}
487
488
489void InstructionSelector::VisitInt32Add(Node* node) {
490  MipsOperandGenerator g(this);
491  Int32BinopMatcher m(node);
492
493  // Select Lsa for (left + (left_of_right << imm)).
494  if (m.right().opcode() == IrOpcode::kWord32Shl &&
495      CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
496    Int32BinopMatcher mright(m.right().node());
497    if (mright.right().HasValue()) {
498      int32_t shift_value = static_cast<int32_t>(mright.right().Value());
499      Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
500           g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
501      return;
502    }
503  }
504
505  // Select Lsa for ((left_of_left << imm) + right).
506  if (m.left().opcode() == IrOpcode::kWord32Shl &&
507      CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
508    Int32BinopMatcher mleft(m.left().node());
509    if (mleft.right().HasValue()) {
510      int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
511      Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
512           g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
513      return;
514    }
515  }
516
517  VisitBinop(this, node, kMipsAdd);
518}
519
520
521void InstructionSelector::VisitInt32Sub(Node* node) {
522  VisitBinop(this, node, kMipsSub);
523}
524
525
526void InstructionSelector::VisitInt32Mul(Node* node) {
527  MipsOperandGenerator g(this);
528  Int32BinopMatcher m(node);
529  if (m.right().HasValue() && m.right().Value() > 0) {
530    int32_t value = m.right().Value();
531    if (base::bits::IsPowerOfTwo32(value)) {
532      Emit(kMipsShl | AddressingModeField::encode(kMode_None),
533           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
534           g.TempImmediate(WhichPowerOf2(value)));
535      return;
536    }
537    if (base::bits::IsPowerOfTwo32(value - 1)) {
538      Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
539           g.UseRegister(m.left().node()),
540           g.TempImmediate(WhichPowerOf2(value - 1)));
541      return;
542    }
543    if (base::bits::IsPowerOfTwo32(value + 1)) {
544      InstructionOperand temp = g.TempRegister();
545      Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
546           g.UseRegister(m.left().node()),
547           g.TempImmediate(WhichPowerOf2(value + 1)));
548      Emit(kMipsSub | AddressingModeField::encode(kMode_None),
549           g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
550      return;
551    }
552  }
553  VisitRRR(this, kMipsMul, node);
554}
555
556
557void InstructionSelector::VisitInt32MulHigh(Node* node) {
558  VisitRRR(this, kMipsMulHigh, node);
559}
560
561
562void InstructionSelector::VisitUint32MulHigh(Node* node) {
563  MipsOperandGenerator g(this);
564  Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
565       g.UseRegister(node->InputAt(1)));
566}
567
568
569void InstructionSelector::VisitInt32Div(Node* node) {
570  MipsOperandGenerator g(this);
571  Int32BinopMatcher m(node);
572  Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
573       g.UseRegister(m.right().node()));
574}
575
576
577void InstructionSelector::VisitUint32Div(Node* node) {
578  MipsOperandGenerator g(this);
579  Int32BinopMatcher m(node);
580  Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
581       g.UseRegister(m.right().node()));
582}
583
584
585void InstructionSelector::VisitInt32Mod(Node* node) {
586  MipsOperandGenerator g(this);
587  Int32BinopMatcher m(node);
588  Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
589       g.UseRegister(m.right().node()));
590}
591
592
593void InstructionSelector::VisitUint32Mod(Node* node) {
594  MipsOperandGenerator g(this);
595  Int32BinopMatcher m(node);
596  Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
597       g.UseRegister(m.right().node()));
598}
599
600
601void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
602  VisitRR(this, kMipsCvtDS, node);
603}
604
605
606void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
607  VisitRR(this, kMipsCvtSW, node);
608}
609
610
611void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
612  VisitRR(this, kMipsCvtSUw, node);
613}
614
615
616void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
617  VisitRR(this, kMipsCvtDW, node);
618}
619
620
621void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
622  VisitRR(this, kMipsCvtDUw, node);
623}
624
625
626void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
627  VisitRR(this, kMipsTruncWS, node);
628}
629
630
631void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
632  VisitRR(this, kMipsTruncUwS, node);
633}
634
635
636void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
637  MipsOperandGenerator g(this);
638  Node* value = node->InputAt(0);
639  // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
640  // which does rounding and conversion to integer format.
641  if (CanCover(node, value)) {
642    switch (value->opcode()) {
643      case IrOpcode::kFloat64RoundDown:
644        Emit(kMipsFloorWD, g.DefineAsRegister(node),
645             g.UseRegister(value->InputAt(0)));
646        return;
647      case IrOpcode::kFloat64RoundUp:
648        Emit(kMipsCeilWD, g.DefineAsRegister(node),
649             g.UseRegister(value->InputAt(0)));
650        return;
651      case IrOpcode::kFloat64RoundTiesEven:
652        Emit(kMipsRoundWD, g.DefineAsRegister(node),
653             g.UseRegister(value->InputAt(0)));
654        return;
655      case IrOpcode::kFloat64RoundTruncate:
656        Emit(kMipsTruncWD, g.DefineAsRegister(node),
657             g.UseRegister(value->InputAt(0)));
658        return;
659      default:
660        break;
661    }
662    if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
663      Node* next = value->InputAt(0);
664      if (CanCover(value, next)) {
665        // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
666        switch (next->opcode()) {
667          case IrOpcode::kFloat32RoundDown:
668            Emit(kMipsFloorWS, g.DefineAsRegister(node),
669                 g.UseRegister(next->InputAt(0)));
670            return;
671          case IrOpcode::kFloat32RoundUp:
672            Emit(kMipsCeilWS, g.DefineAsRegister(node),
673                 g.UseRegister(next->InputAt(0)));
674            return;
675          case IrOpcode::kFloat32RoundTiesEven:
676            Emit(kMipsRoundWS, g.DefineAsRegister(node),
677                 g.UseRegister(next->InputAt(0)));
678            return;
679          case IrOpcode::kFloat32RoundTruncate:
680            Emit(kMipsTruncWS, g.DefineAsRegister(node),
681                 g.UseRegister(next->InputAt(0)));
682            return;
683          default:
684            Emit(kMipsTruncWS, g.DefineAsRegister(node),
685                 g.UseRegister(value->InputAt(0)));
686            return;
687        }
688      } else {
689        // Match float32 -> float64 -> int32 representation change path.
690        Emit(kMipsTruncWS, g.DefineAsRegister(node),
691             g.UseRegister(value->InputAt(0)));
692        return;
693      }
694    }
695  }
696  VisitRR(this, kMipsTruncWD, node);
697}
698
699
700void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
701  VisitRR(this, kMipsTruncUwD, node);
702}
703
704void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
705  VisitRR(this, kMipsTruncUwD, node);
706}
707
708void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
709  MipsOperandGenerator g(this);
710  Node* value = node->InputAt(0);
711  // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
712  // instruction.
713  if (CanCover(node, value) &&
714      value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
715    Emit(kMipsCvtSW, g.DefineAsRegister(node),
716         g.UseRegister(value->InputAt(0)));
717    return;
718  }
719  VisitRR(this, kMipsCvtSD, node);
720}
721
722void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
723  VisitRR(this, kArchTruncateDoubleToI, node);
724}
725
726void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
727  VisitRR(this, kMipsTruncWD, node);
728}
729
730void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
731  VisitRR(this, kMipsFloat64ExtractLowWord32, node);
732}
733
734
735void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
736  MipsOperandGenerator g(this);
737  Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
738       ImmediateOperand(ImmediateOperand::INLINE, 0),
739       g.UseRegister(node->InputAt(0)));
740}
741
742
743void InstructionSelector::VisitFloat32Add(Node* node) {
744  VisitRRR(this, kMipsAddS, node);
745}
746
747
748void InstructionSelector::VisitFloat64Add(Node* node) {
749  VisitRRR(this, kMipsAddD, node);
750}
751
752
753void InstructionSelector::VisitFloat32Sub(Node* node) {
754  VisitRRR(this, kMipsSubS, node);
755}
756
757void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
758  VisitRRR(this, kMipsSubPreserveNanS, node);
759}
760
761void InstructionSelector::VisitFloat64Sub(Node* node) {
762  MipsOperandGenerator g(this);
763  Float64BinopMatcher m(node);
764  if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
765      CanCover(m.node(), m.right().node())) {
766    if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
767        CanCover(m.right().node(), m.right().InputAt(0))) {
768      Float64BinopMatcher mright0(m.right().InputAt(0));
769      if (mright0.left().IsMinusZero()) {
770        Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
771             g.UseRegister(mright0.right().node()));
772        return;
773      }
774    }
775  }
776  VisitRRR(this, kMipsSubD, node);
777}
778
779void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
780  VisitRRR(this, kMipsSubPreserveNanD, node);
781}
782
783void InstructionSelector::VisitFloat32Mul(Node* node) {
784  VisitRRR(this, kMipsMulS, node);
785}
786
787
788void InstructionSelector::VisitFloat64Mul(Node* node) {
789  VisitRRR(this, kMipsMulD, node);
790}
791
792
793void InstructionSelector::VisitFloat32Div(Node* node) {
794  VisitRRR(this, kMipsDivS, node);
795}
796
797
798void InstructionSelector::VisitFloat64Div(Node* node) {
799  VisitRRR(this, kMipsDivD, node);
800}
801
802
803void InstructionSelector::VisitFloat64Mod(Node* node) {
804  MipsOperandGenerator g(this);
805  Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
806       g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
807}
808
809
810void InstructionSelector::VisitFloat32Max(Node* node) {
811  MipsOperandGenerator g(this);
812  if (IsMipsArchVariant(kMips32r6)) {
813    Emit(kMipsFloat32Max, g.DefineAsRegister(node),
814         g.UseUniqueRegister(node->InputAt(0)),
815         g.UseUniqueRegister(node->InputAt(1)));
816
817  } else {
818    // Reverse operands, and use same reg. for result and right operand.
819    Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
820         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
821  }
822}
823
824
825void InstructionSelector::VisitFloat64Max(Node* node) {
826  MipsOperandGenerator g(this);
827  if (IsMipsArchVariant(kMips32r6)) {
828    Emit(kMipsFloat64Max, g.DefineAsRegister(node),
829         g.UseUniqueRegister(node->InputAt(0)),
830         g.UseUniqueRegister(node->InputAt(1)));
831
832  } else {
833    // Reverse operands, and use same reg. for result and right operand.
834    Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
835         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
836  }
837}
838
839
840void InstructionSelector::VisitFloat32Min(Node* node) {
841  MipsOperandGenerator g(this);
842  if (IsMipsArchVariant(kMips32r6)) {
843    Emit(kMipsFloat32Min, g.DefineAsRegister(node),
844         g.UseUniqueRegister(node->InputAt(0)),
845         g.UseUniqueRegister(node->InputAt(1)));
846
847  } else {
848    // Reverse operands, and use same reg. for result and right operand.
849    Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
850         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
851  }
852}
853
854
855void InstructionSelector::VisitFloat64Min(Node* node) {
856  MipsOperandGenerator g(this);
857  if (IsMipsArchVariant(kMips32r6)) {
858    Emit(kMipsFloat64Min, g.DefineAsRegister(node),
859         g.UseUniqueRegister(node->InputAt(0)),
860         g.UseUniqueRegister(node->InputAt(1)));
861
862  } else {
863    // Reverse operands, and use same reg. for result and right operand.
864    Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
865         g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
866  }
867}
868
869
870void InstructionSelector::VisitFloat32Abs(Node* node) {
871  VisitRR(this, kMipsAbsS, node);
872}
873
874
875void InstructionSelector::VisitFloat64Abs(Node* node) {
876  VisitRR(this, kMipsAbsD, node);
877}
878
879void InstructionSelector::VisitFloat32Sqrt(Node* node) {
880  VisitRR(this, kMipsSqrtS, node);
881}
882
883
884void InstructionSelector::VisitFloat64Sqrt(Node* node) {
885  VisitRR(this, kMipsSqrtD, node);
886}
887
888
889void InstructionSelector::VisitFloat32RoundDown(Node* node) {
890  VisitRR(this, kMipsFloat32RoundDown, node);
891}
892
893
894void InstructionSelector::VisitFloat64RoundDown(Node* node) {
895  VisitRR(this, kMipsFloat64RoundDown, node);
896}
897
898
899void InstructionSelector::VisitFloat32RoundUp(Node* node) {
900  VisitRR(this, kMipsFloat32RoundUp, node);
901}
902
903
904void InstructionSelector::VisitFloat64RoundUp(Node* node) {
905  VisitRR(this, kMipsFloat64RoundUp, node);
906}
907
908
909void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
910  VisitRR(this, kMipsFloat32RoundTruncate, node);
911}
912
913
914void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
915  VisitRR(this, kMipsFloat64RoundTruncate, node);
916}
917
918
919void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
920  UNREACHABLE();
921}
922
923
924void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
925  VisitRR(this, kMipsFloat32RoundTiesEven, node);
926}
927
928
929void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
930  VisitRR(this, kMipsFloat64RoundTiesEven, node);
931}
932
933void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
934
935void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
936
937void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
938                                                   InstructionCode opcode) {
939  MipsOperandGenerator g(this);
940  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
941       g.UseFixed(node->InputAt(1), f14))
942      ->MarkAsCall();
943}
944
945void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
946                                                  InstructionCode opcode) {
947  MipsOperandGenerator g(this);
948  Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
949      ->MarkAsCall();
950}
951
952void InstructionSelector::EmitPrepareArguments(
953    ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
954    Node* node) {
955  MipsOperandGenerator g(this);
956
957  // Prepare for C function call.
958  if (descriptor->IsCFunctionCall()) {
959    Emit(kArchPrepareCallCFunction |
960             MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
961         0, nullptr, 0, nullptr);
962
963    // Poke any stack arguments.
964    int slot = kCArgSlotCount;
965    for (PushParameter input : (*arguments)) {
966      if (input.node()) {
967        Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
968             g.TempImmediate(slot << kPointerSizeLog2));
969        ++slot;
970      }
971    }
972  } else {
973    // Possibly align stack here for functions.
974    int push_count = static_cast<int>(descriptor->StackParameterCount());
975    if (push_count > 0) {
976      Emit(kMipsStackClaim, g.NoOutput(),
977           g.TempImmediate(push_count << kPointerSizeLog2));
978    }
979    for (size_t n = 0; n < arguments->size(); ++n) {
980      PushParameter input = (*arguments)[n];
981      if (input.node()) {
982        Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
983             g.TempImmediate(n << kPointerSizeLog2));
984      }
985    }
986  }
987}
988
989
990bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
991
992int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
993
994void InstructionSelector::VisitCheckedLoad(Node* node) {
995  CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
996  MipsOperandGenerator g(this);
997  Node* const buffer = node->InputAt(0);
998  Node* const offset = node->InputAt(1);
999  Node* const length = node->InputAt(2);
1000  ArchOpcode opcode = kArchNop;
1001  switch (load_rep.representation()) {
1002    case MachineRepresentation::kWord8:
1003      opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
1004      break;
1005    case MachineRepresentation::kWord16:
1006      opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
1007      break;
1008    case MachineRepresentation::kWord32:
1009      opcode = kCheckedLoadWord32;
1010      break;
1011    case MachineRepresentation::kFloat32:
1012      opcode = kCheckedLoadFloat32;
1013      break;
1014    case MachineRepresentation::kFloat64:
1015      opcode = kCheckedLoadFloat64;
1016      break;
1017    case MachineRepresentation::kBit:      // Fall through.
1018    case MachineRepresentation::kTagged:   // Fall through.
1019    case MachineRepresentation::kWord64:   // Fall through.
1020    case MachineRepresentation::kSimd128:  // Fall through.
1021    case MachineRepresentation::kNone:
1022      UNREACHABLE();
1023      return;
1024  }
1025  InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1026                                          ? g.UseImmediate(offset)
1027                                          : g.UseRegister(offset);
1028
1029  InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1030                                          ? g.CanBeImmediate(length, opcode)
1031                                                ? g.UseImmediate(length)
1032                                                : g.UseRegister(length)
1033                                          : g.UseRegister(length);
1034
1035  Emit(opcode | AddressingModeField::encode(kMode_MRI),
1036       g.DefineAsRegister(node), offset_operand, length_operand,
1037       g.UseRegister(buffer));
1038}
1039
1040
1041void InstructionSelector::VisitCheckedStore(Node* node) {
1042  MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
1043  MipsOperandGenerator g(this);
1044  Node* const buffer = node->InputAt(0);
1045  Node* const offset = node->InputAt(1);
1046  Node* const length = node->InputAt(2);
1047  Node* const value = node->InputAt(3);
1048  ArchOpcode opcode = kArchNop;
1049  switch (rep) {
1050    case MachineRepresentation::kWord8:
1051      opcode = kCheckedStoreWord8;
1052      break;
1053    case MachineRepresentation::kWord16:
1054      opcode = kCheckedStoreWord16;
1055      break;
1056    case MachineRepresentation::kWord32:
1057      opcode = kCheckedStoreWord32;
1058      break;
1059    case MachineRepresentation::kFloat32:
1060      opcode = kCheckedStoreFloat32;
1061      break;
1062    case MachineRepresentation::kFloat64:
1063      opcode = kCheckedStoreFloat64;
1064      break;
1065    default:
1066      UNREACHABLE();
1067      return;
1068  }
1069  InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
1070                                          ? g.UseImmediate(offset)
1071                                          : g.UseRegister(offset);
1072
1073  InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
1074                                          ? g.CanBeImmediate(length, opcode)
1075                                                ? g.UseImmediate(length)
1076                                                : g.UseRegister(length)
1077                                          : g.UseRegister(length);
1078
1079  Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1080       offset_operand, length_operand, g.UseRegister(value),
1081       g.UseRegister(buffer));
1082}
1083
1084
1085namespace {
1086// Shared routine for multiple compare operations.
1087static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1088                         InstructionOperand left, InstructionOperand right,
1089                         FlagsContinuation* cont) {
1090  MipsOperandGenerator g(selector);
1091  opcode = cont->Encode(opcode);
1092  if (cont->IsBranch()) {
1093    selector->Emit(opcode, g.NoOutput(), left, right,
1094                   g.Label(cont->true_block()), g.Label(cont->false_block()));
1095  } else if (cont->IsDeoptimize()) {
1096    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
1097                             cont->frame_state());
1098  } else {
1099    DCHECK(cont->IsSet());
1100    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
1101  }
1102}
1103
1104
1105// Shared routine for multiple float32 compare operations.
1106void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1107                         FlagsContinuation* cont) {
1108  MipsOperandGenerator g(selector);
1109  Float32BinopMatcher m(node);
1110  InstructionOperand lhs, rhs;
1111
1112  lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1113                          : g.UseRegister(m.left().node());
1114  rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1115                           : g.UseRegister(m.right().node());
1116  VisitCompare(selector, kMipsCmpS, lhs, rhs, cont);
1117}
1118
1119
1120// Shared routine for multiple float64 compare operations.
1121void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1122                         FlagsContinuation* cont) {
1123  MipsOperandGenerator g(selector);
1124  Float64BinopMatcher m(node);
1125  InstructionOperand lhs, rhs;
1126
1127  lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1128                          : g.UseRegister(m.left().node());
1129  rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1130                           : g.UseRegister(m.right().node());
1131  VisitCompare(selector, kMipsCmpD, lhs, rhs, cont);
1132}
1133
1134
1135// Shared routine for multiple word compare operations.
1136void VisitWordCompare(InstructionSelector* selector, Node* node,
1137                      InstructionCode opcode, FlagsContinuation* cont,
1138                      bool commutative) {
1139  MipsOperandGenerator g(selector);
1140  Node* left = node->InputAt(0);
1141  Node* right = node->InputAt(1);
1142
1143  // Match immediates on left or right side of comparison.
1144  if (g.CanBeImmediate(right, opcode)) {
1145    switch (cont->condition()) {
1146      case kEqual:
1147      case kNotEqual:
1148        if (cont->IsSet()) {
1149          VisitCompare(selector, opcode, g.UseRegister(left),
1150                       g.UseImmediate(right), cont);
1151        } else {
1152          VisitCompare(selector, opcode, g.UseRegister(left),
1153                       g.UseRegister(right), cont);
1154        }
1155        break;
1156      case kSignedLessThan:
1157      case kSignedGreaterThanOrEqual:
1158      case kUnsignedLessThan:
1159      case kUnsignedGreaterThanOrEqual:
1160        VisitCompare(selector, opcode, g.UseRegister(left),
1161                     g.UseImmediate(right), cont);
1162        break;
1163      default:
1164        VisitCompare(selector, opcode, g.UseRegister(left),
1165                     g.UseRegister(right), cont);
1166    }
1167  } else if (g.CanBeImmediate(left, opcode)) {
1168    if (!commutative) cont->Commute();
1169    switch (cont->condition()) {
1170      case kEqual:
1171      case kNotEqual:
1172        if (cont->IsSet()) {
1173          VisitCompare(selector, opcode, g.UseRegister(right),
1174                       g.UseImmediate(left), cont);
1175        } else {
1176          VisitCompare(selector, opcode, g.UseRegister(right),
1177                       g.UseRegister(left), cont);
1178        }
1179        break;
1180      case kSignedLessThan:
1181      case kSignedGreaterThanOrEqual:
1182      case kUnsignedLessThan:
1183      case kUnsignedGreaterThanOrEqual:
1184        VisitCompare(selector, opcode, g.UseRegister(right),
1185                     g.UseImmediate(left), cont);
1186        break;
1187      default:
1188        VisitCompare(selector, opcode, g.UseRegister(right),
1189                     g.UseRegister(left), cont);
1190    }
1191  } else {
1192    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1193                 cont);
1194  }
1195}
1196
1197
1198void VisitWordCompare(InstructionSelector* selector, Node* node,
1199                      FlagsContinuation* cont) {
1200  VisitWordCompare(selector, node, kMipsCmp, cont, false);
1201}
1202
1203// Shared routine for word comparisons against zero.
1204void VisitWordCompareZero(InstructionSelector* selector, Node* user,
1205                          Node* value, FlagsContinuation* cont) {
1206  while (selector->CanCover(user, value)) {
1207    switch (value->opcode()) {
1208      case IrOpcode::kWord32Equal: {
1209        // Combine with comparisons against 0 by simply inverting the
1210        // continuation.
1211        Int32BinopMatcher m(value);
1212        if (m.right().Is(0)) {
1213          user = value;
1214          value = m.left().node();
1215          cont->Negate();
1216          continue;
1217        }
1218        cont->OverwriteAndNegateIfEqual(kEqual);
1219        return VisitWordCompare(selector, value, cont);
1220      }
1221      case IrOpcode::kInt32LessThan:
1222        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
1223        return VisitWordCompare(selector, value, cont);
1224      case IrOpcode::kInt32LessThanOrEqual:
1225        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
1226        return VisitWordCompare(selector, value, cont);
1227      case IrOpcode::kUint32LessThan:
1228        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1229        return VisitWordCompare(selector, value, cont);
1230      case IrOpcode::kUint32LessThanOrEqual:
1231        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1232        return VisitWordCompare(selector, value, cont);
1233      case IrOpcode::kFloat32Equal:
1234        cont->OverwriteAndNegateIfEqual(kEqual);
1235        return VisitFloat32Compare(selector, value, cont);
1236      case IrOpcode::kFloat32LessThan:
1237        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1238        return VisitFloat32Compare(selector, value, cont);
1239      case IrOpcode::kFloat32LessThanOrEqual:
1240        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1241        return VisitFloat32Compare(selector, value, cont);
1242      case IrOpcode::kFloat64Equal:
1243        cont->OverwriteAndNegateIfEqual(kEqual);
1244        return VisitFloat64Compare(selector, value, cont);
1245      case IrOpcode::kFloat64LessThan:
1246        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
1247        return VisitFloat64Compare(selector, value, cont);
1248      case IrOpcode::kFloat64LessThanOrEqual:
1249        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
1250        return VisitFloat64Compare(selector, value, cont);
1251      case IrOpcode::kProjection:
1252        // Check if this is the overflow output projection of an
1253        // <Operation>WithOverflow node.
1254        if (ProjectionIndexOf(value->op()) == 1u) {
1255          // We cannot combine the <Operation>WithOverflow with this branch
1256          // unless the 0th projection (the use of the actual value of the
1257          // <Operation> is either nullptr, which means there's no use of the
1258          // actual value, or was already defined, which means it is scheduled
1259          // *AFTER* this branch).
1260          Node* const node = value->InputAt(0);
1261          Node* const result = NodeProperties::FindProjection(node, 0);
1262          if (!result || selector->IsDefined(result)) {
1263            switch (node->opcode()) {
1264              case IrOpcode::kInt32AddWithOverflow:
1265                cont->OverwriteAndNegateIfEqual(kOverflow);
1266                return VisitBinop(selector, node, kMipsAddOvf, cont);
1267              case IrOpcode::kInt32SubWithOverflow:
1268                cont->OverwriteAndNegateIfEqual(kOverflow);
1269                return VisitBinop(selector, node, kMipsSubOvf, cont);
1270              default:
1271                break;
1272            }
1273          }
1274        }
1275        break;
1276      case IrOpcode::kWord32And:
1277        return VisitWordCompare(selector, value, kMipsTst, cont, true);
1278      default:
1279        break;
1280    }
1281    break;
1282  }
1283
1284  // Continuation could not be combined with a compare, emit compare against 0.
1285  MipsOperandGenerator g(selector);
1286  InstructionCode const opcode = cont->Encode(kMipsCmp);
1287  InstructionOperand const value_operand = g.UseRegister(value);
1288  if (cont->IsBranch()) {
1289    selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
1290                   g.Label(cont->true_block()), g.Label(cont->false_block()));
1291  } else if (cont->IsDeoptimize()) {
1292    selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
1293                             g.TempImmediate(0), cont->frame_state());
1294  } else {
1295    DCHECK(cont->IsSet());
1296    selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
1297                   g.TempImmediate(0));
1298  }
1299}
1300
1301}  // namespace
1302
1303void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1304                                      BasicBlock* fbranch) {
1305  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
1306  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
1307}
1308
1309void InstructionSelector::VisitDeoptimizeIf(Node* node) {
1310  FlagsContinuation cont =
1311      FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
1312  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1313}
1314
1315void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
1316  FlagsContinuation cont =
1317      FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
1318  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
1319}
1320
1321void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1322  MipsOperandGenerator g(this);
1323  InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
1324
1325  // Emit either ArchTableSwitch or ArchLookupSwitch.
1326  size_t table_space_cost = 9 + sw.value_range;
1327  size_t table_time_cost = 3;
1328  size_t lookup_space_cost = 2 + 2 * sw.case_count;
1329  size_t lookup_time_cost = sw.case_count;
1330  if (sw.case_count > 0 &&
1331      table_space_cost + 3 * table_time_cost <=
1332          lookup_space_cost + 3 * lookup_time_cost &&
1333      sw.min_value > std::numeric_limits<int32_t>::min()) {
1334    InstructionOperand index_operand = value_operand;
1335    if (sw.min_value) {
1336      index_operand = g.TempRegister();
1337      Emit(kMipsSub, index_operand, value_operand,
1338           g.TempImmediate(sw.min_value));
1339    }
1340    // Generate a table lookup.
1341    return EmitTableSwitch(sw, index_operand);
1342  }
1343
1344  // Generate a sequence of conditional jumps.
1345  return EmitLookupSwitch(sw, value_operand);
1346}
1347
1348
1349void InstructionSelector::VisitWord32Equal(Node* const node) {
1350  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1351  Int32BinopMatcher m(node);
1352  if (m.right().Is(0)) {
1353    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
1354  }
1355  VisitWordCompare(this, node, &cont);
1356}
1357
1358
1359void InstructionSelector::VisitInt32LessThan(Node* node) {
1360  FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
1361  VisitWordCompare(this, node, &cont);
1362}
1363
1364
1365void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
1366  FlagsContinuation cont =
1367      FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
1368  VisitWordCompare(this, node, &cont);
1369}
1370
1371
1372void InstructionSelector::VisitUint32LessThan(Node* node) {
1373  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1374  VisitWordCompare(this, node, &cont);
1375}
1376
1377
1378void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
1379  FlagsContinuation cont =
1380      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1381  VisitWordCompare(this, node, &cont);
1382}
1383
1384
1385void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
1386  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1387    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1388    return VisitBinop(this, node, kMipsAddOvf, &cont);
1389  }
1390  FlagsContinuation cont;
1391  VisitBinop(this, node, kMipsAddOvf, &cont);
1392}
1393
1394
1395void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
1396  if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
1397    FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
1398    return VisitBinop(this, node, kMipsSubOvf, &cont);
1399  }
1400  FlagsContinuation cont;
1401  VisitBinop(this, node, kMipsSubOvf, &cont);
1402}
1403
1404
1405void InstructionSelector::VisitFloat32Equal(Node* node) {
1406  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1407  VisitFloat32Compare(this, node, &cont);
1408}
1409
1410
1411void InstructionSelector::VisitFloat32LessThan(Node* node) {
1412  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1413  VisitFloat32Compare(this, node, &cont);
1414}
1415
1416
1417void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
1418  FlagsContinuation cont =
1419      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1420  VisitFloat32Compare(this, node, &cont);
1421}
1422
1423
1424void InstructionSelector::VisitFloat64Equal(Node* node) {
1425  FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
1426  VisitFloat64Compare(this, node, &cont);
1427}
1428
1429
1430void InstructionSelector::VisitFloat64LessThan(Node* node) {
1431  FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
1432  VisitFloat64Compare(this, node, &cont);
1433}
1434
1435
1436void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
1437  FlagsContinuation cont =
1438      FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
1439  VisitFloat64Compare(this, node, &cont);
1440}
1441
1442
1443void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
1444  MipsOperandGenerator g(this);
1445  Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
1446       g.UseRegister(node->InputAt(0)));
1447}
1448
1449
1450void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
1451  MipsOperandGenerator g(this);
1452  Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
1453       g.UseRegister(node->InputAt(0)));
1454}
1455
1456
1457void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
1458  MipsOperandGenerator g(this);
1459  Node* left = node->InputAt(0);
1460  Node* right = node->InputAt(1);
1461  Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
1462       g.UseRegister(left), g.UseRegister(right));
1463}
1464
1465
1466void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
1467  MipsOperandGenerator g(this);
1468  Node* left = node->InputAt(0);
1469  Node* right = node->InputAt(1);
1470  Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
1471       g.UseRegister(left), g.UseRegister(right));
1472}
1473
1474void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
1475  MipsOperandGenerator g(this);
1476  Node* left = node->InputAt(0);
1477  InstructionOperand temps[] = {g.TempRegister()};
1478  Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
1479       arraysize(temps), temps);
1480}
1481
1482void InstructionSelector::VisitAtomicLoad(Node* node) {
1483  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1484  MipsOperandGenerator g(this);
1485  Node* base = node->InputAt(0);
1486  Node* index = node->InputAt(1);
1487  ArchOpcode opcode = kArchNop;
1488  switch (load_rep.representation()) {
1489    case MachineRepresentation::kWord8:
1490      opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1491      break;
1492    case MachineRepresentation::kWord16:
1493      opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1494      break;
1495    case MachineRepresentation::kWord32:
1496      opcode = kAtomicLoadWord32;
1497      break;
1498    default:
1499      UNREACHABLE();
1500      return;
1501  }
1502  if (g.CanBeImmediate(index, opcode)) {
1503    Emit(opcode | AddressingModeField::encode(kMode_MRI),
1504         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1505  } else {
1506    InstructionOperand addr_reg = g.TempRegister();
1507    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1508         g.UseRegister(index), g.UseRegister(base));
1509    // Emit desired load opcode, using temp addr_reg.
1510    Emit(opcode | AddressingModeField::encode(kMode_MRI),
1511         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1512  }
1513}
1514
1515void InstructionSelector::VisitAtomicStore(Node* node) {
1516  MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
1517  MipsOperandGenerator g(this);
1518  Node* base = node->InputAt(0);
1519  Node* index = node->InputAt(1);
1520  Node* value = node->InputAt(2);
1521  ArchOpcode opcode = kArchNop;
1522  switch (rep) {
1523    case MachineRepresentation::kWord8:
1524      opcode = kAtomicStoreWord8;
1525      break;
1526    case MachineRepresentation::kWord16:
1527      opcode = kAtomicStoreWord16;
1528      break;
1529    case MachineRepresentation::kWord32:
1530      opcode = kAtomicStoreWord32;
1531      break;
1532    default:
1533      UNREACHABLE();
1534      return;
1535  }
1536
1537  if (g.CanBeImmediate(index, opcode)) {
1538    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1539         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
1540  } else {
1541    InstructionOperand addr_reg = g.TempRegister();
1542    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
1543         g.UseRegister(index), g.UseRegister(base));
1544    // Emit desired store opcode, using temp addr_reg.
1545    Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1546         addr_reg, g.TempImmediate(0), g.UseRegister(value));
1547  }
1548}
1549
1550// static
1551MachineOperatorBuilder::Flags
1552InstructionSelector::SupportedMachineOperatorFlags() {
1553  MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
1554  if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
1555      IsFp64Mode()) {
1556    flags |= MachineOperatorBuilder::kFloat64RoundDown |
1557             MachineOperatorBuilder::kFloat64RoundUp |
1558             MachineOperatorBuilder::kFloat64RoundTruncate |
1559             MachineOperatorBuilder::kFloat64RoundTiesEven;
1560  }
1561  return flags | MachineOperatorBuilder::kWord32Ctz |
1562         MachineOperatorBuilder::kWord32Popcnt |
1563         MachineOperatorBuilder::kInt32DivIsSafe |
1564         MachineOperatorBuilder::kUint32DivIsSafe |
1565         MachineOperatorBuilder::kWord32ShiftIsSafe |
1566         MachineOperatorBuilder::kFloat64Min |
1567         MachineOperatorBuilder::kFloat64Max |
1568         MachineOperatorBuilder::kFloat32Min |
1569         MachineOperatorBuilder::kFloat32Max |
1570         MachineOperatorBuilder::kFloat32RoundDown |
1571         MachineOperatorBuilder::kFloat32RoundUp |
1572         MachineOperatorBuilder::kFloat32RoundTruncate |
1573         MachineOperatorBuilder::kFloat32RoundTiesEven;
1574}
1575
1576// static
1577MachineOperatorBuilder::AlignmentRequirements
1578InstructionSelector::AlignmentRequirements() {
1579  if (IsMipsArchVariant(kMips32r6)) {
1580    return MachineOperatorBuilder::AlignmentRequirements::
1581        FullUnalignedAccessSupport();
1582  } else {
1583    DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1584           IsMipsArchVariant(kMips32r2));
1585    return MachineOperatorBuilder::AlignmentRequirements::
1586        NoUnalignedAccessSupport();
1587  }
1588}
1589
1590}  // namespace compiler
1591}  // namespace internal
1592}  // namespace v8
1593