MipsSEISelLowering.cpp revision d2a31a124f3bebbdfc4d886afe33a116893aa689
1//===-- MipsSEISelLowering.cpp - MipsSE DAG Lowering Interface --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Subclass of MipsTargetLowering specialized for mips32/64.
11//
12//===----------------------------------------------------------------------===//
13#include "MipsSEISelLowering.h"
14#include "MipsRegisterInfo.h"
15#include "MipsTargetMachine.h"
16#include "llvm/CodeGen/MachineInstrBuilder.h"
17#include "llvm/CodeGen/MachineRegisterInfo.h"
18#include "llvm/IR/Intrinsics.h"
19#include "llvm/Support/CommandLine.h"
20#include "llvm/Target/TargetInstrInfo.h"
21
22using namespace llvm;
23
24static cl::opt<bool>
25EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
26                    cl::desc("MIPS: Enable tail calls."), cl::init(false));
27
28static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false),
29                                   cl::desc("Expand double precision loads and "
30                                            "stores to their single precision "
31                                            "counterparts"));
32
33MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
34  : MipsTargetLowering(TM) {
35  // Set up the register classes
36
37  clearRegisterClasses();
38
39  addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
40
41  if (HasMips64)
42    addRegisterClass(MVT::i64, &Mips::GPR64RegClass);
43
44  if (Subtarget->hasDSP() || Subtarget->hasMSA()) {
45    // Expand all truncating stores and extending loads.
46    unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
47    unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
48
49    for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) {
50      for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1)
51        setTruncStoreAction((MVT::SimpleValueType)VT0,
52                            (MVT::SimpleValueType)VT1, Expand);
53
54      setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
55      setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
56      setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand);
57    }
58  }
59
60  if (Subtarget->hasDSP()) {
61    MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
62
63    for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
64      addRegisterClass(VecTys[i], &Mips::DSPRRegClass);
65
66      // Expand all builtin opcodes.
67      for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
68        setOperationAction(Opc, VecTys[i], Expand);
69
70      setOperationAction(ISD::ADD, VecTys[i], Legal);
71      setOperationAction(ISD::SUB, VecTys[i], Legal);
72      setOperationAction(ISD::LOAD, VecTys[i], Legal);
73      setOperationAction(ISD::STORE, VecTys[i], Legal);
74      setOperationAction(ISD::BITCAST, VecTys[i], Legal);
75    }
76
77    setTargetDAGCombine(ISD::SHL);
78    setTargetDAGCombine(ISD::SRA);
79    setTargetDAGCombine(ISD::SRL);
80    setTargetDAGCombine(ISD::SETCC);
81    setTargetDAGCombine(ISD::VSELECT);
82  }
83
84  if (Subtarget->hasDSPR2())
85    setOperationAction(ISD::MUL, MVT::v2i16, Legal);
86
87  if (Subtarget->hasMSA()) {
88    addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass);
89    addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass);
90    addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass);
91    addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass);
92    addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass);
93    addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass);
94    addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass);
95
96    setTargetDAGCombine(ISD::AND);
97    setTargetDAGCombine(ISD::SRA);
98    setTargetDAGCombine(ISD::VSELECT);
99    setTargetDAGCombine(ISD::XOR);
100  }
101
102  if (!Subtarget->mipsSEUsesSoftFloat()) {
103    addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
104
105    // When dealing with single precision only, use libcalls
106    if (!Subtarget->isSingleFloat()) {
107      if (Subtarget->isFP64bit())
108        addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
109      else
110        addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
111    }
112  }
113
114  setOperationAction(ISD::SMUL_LOHI,          MVT::i32, Custom);
115  setOperationAction(ISD::UMUL_LOHI,          MVT::i32, Custom);
116  setOperationAction(ISD::MULHS,              MVT::i32, Custom);
117  setOperationAction(ISD::MULHU,              MVT::i32, Custom);
118
119  if (HasMips64) {
120    setOperationAction(ISD::MULHS,            MVT::i64, Custom);
121    setOperationAction(ISD::MULHU,            MVT::i64, Custom);
122    setOperationAction(ISD::MUL,              MVT::i64, Custom);
123  }
124
125  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
126  setOperationAction(ISD::INTRINSIC_W_CHAIN,  MVT::i64, Custom);
127
128  setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
129  setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
130  setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
131  setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
132  setOperationAction(ISD::ATOMIC_FENCE,       MVT::Other, Custom);
133  setOperationAction(ISD::LOAD,               MVT::i32, Custom);
134  setOperationAction(ISD::STORE,              MVT::i32, Custom);
135
136  setTargetDAGCombine(ISD::ADDE);
137  setTargetDAGCombine(ISD::SUBE);
138  setTargetDAGCombine(ISD::MUL);
139
140  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
141  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
142  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
143
144  if (NoDPLoadStore) {
145    setOperationAction(ISD::LOAD, MVT::f64, Custom);
146    setOperationAction(ISD::STORE, MVT::f64, Custom);
147  }
148
149  computeRegisterProperties();
150}
151
152const MipsTargetLowering *
153llvm::createMipsSETargetLowering(MipsTargetMachine &TM) {
154  return new MipsSETargetLowering(TM);
155}
156
157// Enable MSA support for the given integer type and Register class.
158void MipsSETargetLowering::
159addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
160  addRegisterClass(Ty, RC);
161
162  // Expand all builtin opcodes.
163  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
164    setOperationAction(Opc, Ty, Expand);
165
166  setOperationAction(ISD::BITCAST, Ty, Legal);
167  setOperationAction(ISD::LOAD, Ty, Legal);
168  setOperationAction(ISD::STORE, Ty, Legal);
169  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom);
170  setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
171  setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
172
173  setOperationAction(ISD::ADD, Ty, Legal);
174  setOperationAction(ISD::AND, Ty, Legal);
175  setOperationAction(ISD::CTLZ, Ty, Legal);
176  setOperationAction(ISD::CTPOP, Ty, Legal);
177  setOperationAction(ISD::MUL, Ty, Legal);
178  setOperationAction(ISD::OR, Ty, Legal);
179  setOperationAction(ISD::SDIV, Ty, Legal);
180  setOperationAction(ISD::SHL, Ty, Legal);
181  setOperationAction(ISD::SRA, Ty, Legal);
182  setOperationAction(ISD::SRL, Ty, Legal);
183  setOperationAction(ISD::SUB, Ty, Legal);
184  setOperationAction(ISD::UDIV, Ty, Legal);
185  setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom);
186  setOperationAction(ISD::VSELECT, Ty, Legal);
187  setOperationAction(ISD::XOR, Ty, Legal);
188
189  setOperationAction(ISD::SETCC, Ty, Legal);
190  setCondCodeAction(ISD::SETNE, Ty, Expand);
191  setCondCodeAction(ISD::SETGE, Ty, Expand);
192  setCondCodeAction(ISD::SETGT, Ty, Expand);
193  setCondCodeAction(ISD::SETUGE, Ty, Expand);
194  setCondCodeAction(ISD::SETUGT, Ty, Expand);
195}
196
197// Enable MSA support for the given floating-point type and Register class.
198void MipsSETargetLowering::
199addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
200  addRegisterClass(Ty, RC);
201
202  // Expand all builtin opcodes.
203  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
204    setOperationAction(Opc, Ty, Expand);
205
206  setOperationAction(ISD::LOAD, Ty, Legal);
207  setOperationAction(ISD::STORE, Ty, Legal);
208  setOperationAction(ISD::BITCAST, Ty, Legal);
209  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal);
210
211  if (Ty != MVT::v8f16) {
212    setOperationAction(ISD::FABS,  Ty, Legal);
213    setOperationAction(ISD::FADD,  Ty, Legal);
214    setOperationAction(ISD::FDIV,  Ty, Legal);
215    setOperationAction(ISD::FLOG2, Ty, Legal);
216    setOperationAction(ISD::FMUL,  Ty, Legal);
217    setOperationAction(ISD::FRINT, Ty, Legal);
218    setOperationAction(ISD::FSQRT, Ty, Legal);
219    setOperationAction(ISD::FSUB,  Ty, Legal);
220    setOperationAction(ISD::VSELECT, Ty, Legal);
221
222    setOperationAction(ISD::SETCC, Ty, Legal);
223    setCondCodeAction(ISD::SETOGE, Ty, Expand);
224    setCondCodeAction(ISD::SETOGT, Ty, Expand);
225    setCondCodeAction(ISD::SETUGE, Ty, Expand);
226    setCondCodeAction(ISD::SETUGT, Ty, Expand);
227    setCondCodeAction(ISD::SETGE,  Ty, Expand);
228    setCondCodeAction(ISD::SETGT,  Ty, Expand);
229  }
230}
231
232bool
233MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
234  MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
235
236  switch (SVT) {
237  case MVT::i64:
238  case MVT::i32:
239    if (Fast)
240      *Fast = true;
241    return true;
242  default:
243    return false;
244  }
245}
246
247SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
248                                             SelectionDAG &DAG) const {
249  switch(Op.getOpcode()) {
250  case ISD::LOAD:  return lowerLOAD(Op, DAG);
251  case ISD::STORE: return lowerSTORE(Op, DAG);
252  case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG);
253  case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG);
254  case ISD::MULHS:     return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG);
255  case ISD::MULHU:     return lowerMulDiv(Op, MipsISD::Multu, false, true, DAG);
256  case ISD::MUL:       return lowerMulDiv(Op, MipsISD::Mult, true, false, DAG);
257  case ISD::SDIVREM:   return lowerMulDiv(Op, MipsISD::DivRem, true, true, DAG);
258  case ISD::UDIVREM:   return lowerMulDiv(Op, MipsISD::DivRemU, true, true,
259                                          DAG);
260  case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG);
261  case ISD::INTRINSIC_W_CHAIN:  return lowerINTRINSIC_W_CHAIN(Op, DAG);
262  case ISD::INTRINSIC_VOID:     return lowerINTRINSIC_VOID(Op, DAG);
263  case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG);
264  case ISD::BUILD_VECTOR:       return lowerBUILD_VECTOR(Op, DAG);
265  case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, DAG);
266  }
267
268  return MipsTargetLowering::LowerOperation(Op, DAG);
269}
270
271// selectMADD -
272// Transforms a subgraph in CurDAG if the following pattern is found:
273//  (addc multLo, Lo0), (adde multHi, Hi0),
274// where,
275//  multHi/Lo: product of multiplication
276//  Lo0: initial value of Lo register
277//  Hi0: initial value of Hi register
278// Return true if pattern matching was successful.
279static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
280  // ADDENode's second operand must be a flag output of an ADDC node in order
281  // for the matching to be successful.
282  SDNode *ADDCNode = ADDENode->getOperand(2).getNode();
283
284  if (ADDCNode->getOpcode() != ISD::ADDC)
285    return false;
286
287  SDValue MultHi = ADDENode->getOperand(0);
288  SDValue MultLo = ADDCNode->getOperand(0);
289  SDNode *MultNode = MultHi.getNode();
290  unsigned MultOpc = MultHi.getOpcode();
291
292  // MultHi and MultLo must be generated by the same node,
293  if (MultLo.getNode() != MultNode)
294    return false;
295
296  // and it must be a multiplication.
297  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
298    return false;
299
300  // MultLo amd MultHi must be the first and second output of MultNode
301  // respectively.
302  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
303    return false;
304
305  // Transform this to a MADD only if ADDENode and ADDCNode are the only users
306  // of the values of MultNode, in which case MultNode will be removed in later
307  // phases.
308  // If there exist users other than ADDENode or ADDCNode, this function returns
309  // here, which will result in MultNode being mapped to a single MULT
310  // instruction node rather than a pair of MULT and MADD instructions being
311  // produced.
312  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
313    return false;
314
315  SDLoc DL(ADDENode);
316
317  // Initialize accumulator.
318  SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
319                                  ADDCNode->getOperand(1),
320                                  ADDENode->getOperand(1));
321
322  // create MipsMAdd(u) node
323  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd;
324
325  SDValue MAdd = CurDAG->getNode(MultOpc, DL, MVT::Untyped,
326                                 MultNode->getOperand(0),// Factor 0
327                                 MultNode->getOperand(1),// Factor 1
328                                 ACCIn);
329
330  // replace uses of adde and addc here
331  if (!SDValue(ADDCNode, 0).use_empty()) {
332    SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
333    SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
334                                    LoIdx);
335    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut);
336  }
337  if (!SDValue(ADDENode, 0).use_empty()) {
338    SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
339    SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
340                                    HiIdx);
341    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut);
342  }
343
344  return true;
345}
346
347// selectMSUB -
348// Transforms a subgraph in CurDAG if the following pattern is found:
349//  (addc Lo0, multLo), (sube Hi0, multHi),
350// where,
351//  multHi/Lo: product of multiplication
352//  Lo0: initial value of Lo register
353//  Hi0: initial value of Hi register
354// Return true if pattern matching was successful.
355static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
356  // SUBENode's second operand must be a flag output of an SUBC node in order
357  // for the matching to be successful.
358  SDNode *SUBCNode = SUBENode->getOperand(2).getNode();
359
360  if (SUBCNode->getOpcode() != ISD::SUBC)
361    return false;
362
363  SDValue MultHi = SUBENode->getOperand(1);
364  SDValue MultLo = SUBCNode->getOperand(1);
365  SDNode *MultNode = MultHi.getNode();
366  unsigned MultOpc = MultHi.getOpcode();
367
368  // MultHi and MultLo must be generated by the same node,
369  if (MultLo.getNode() != MultNode)
370    return false;
371
372  // and it must be a multiplication.
373  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
374    return false;
375
376  // MultLo amd MultHi must be the first and second output of MultNode
377  // respectively.
378  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
379    return false;
380
381  // Transform this to a MSUB only if SUBENode and SUBCNode are the only users
382  // of the values of MultNode, in which case MultNode will be removed in later
383  // phases.
384  // If there exist users other than SUBENode or SUBCNode, this function returns
385  // here, which will result in MultNode being mapped to a single MULT
386  // instruction node rather than a pair of MULT and MSUB instructions being
387  // produced.
388  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
389    return false;
390
391  SDLoc DL(SUBENode);
392
393  // Initialize accumulator.
394  SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
395                                  SUBCNode->getOperand(0),
396                                  SUBENode->getOperand(0));
397
398  // create MipsSub(u) node
399  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub;
400
401  SDValue MSub = CurDAG->getNode(MultOpc, DL, MVT::Glue,
402                                 MultNode->getOperand(0),// Factor 0
403                                 MultNode->getOperand(1),// Factor 1
404                                 ACCIn);
405
406  // replace uses of sube and subc here
407  if (!SDValue(SUBCNode, 0).use_empty()) {
408    SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
409    SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
410                                    LoIdx);
411    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut);
412  }
413  if (!SDValue(SUBENode, 0).use_empty()) {
414    SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
415    SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
416                                    HiIdx);
417    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut);
418  }
419
420  return true;
421}
422
423static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
424                                  TargetLowering::DAGCombinerInfo &DCI,
425                                  const MipsSubtarget *Subtarget) {
426  if (DCI.isBeforeLegalize())
427    return SDValue();
428
429  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
430      selectMADD(N, &DAG))
431    return SDValue(N, 0);
432
433  return SDValue();
434}
435
436// Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT
437//
438// Performs the following transformations:
439// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its
440//   sign/zero-extension is completely overwritten by the new one performed by
441//   the ISD::AND.
442// - Removes redundant zero extensions performed by an ISD::AND.
443static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
444                                 TargetLowering::DAGCombinerInfo &DCI,
445                                 const MipsSubtarget *Subtarget) {
446  if (!Subtarget->hasMSA())
447    return SDValue();
448
449  SDValue Op0 = N->getOperand(0);
450  SDValue Op1 = N->getOperand(1);
451  unsigned Op0Opcode = Op0->getOpcode();
452
453  // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d)
454  // where $d + 1 == 2^n and n == 32
455  // or    $d + 1 == 2^n and n <= 32 and ZExt
456  // -> (MipsVExtractZExt $a, $b, $c)
457  if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT ||
458      Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) {
459    ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1);
460
461    if (!Mask)
462      return SDValue();
463
464    int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2();
465
466    if (Log2IfPositive <= 0)
467      return SDValue(); // Mask+1 is not a power of 2
468
469    SDValue Op0Op2 = Op0->getOperand(2);
470    EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT();
471    unsigned ExtendTySize = ExtendTy.getSizeInBits();
472    unsigned Log2 = Log2IfPositive;
473
474    if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) ||
475        Log2 == ExtendTySize) {
476      SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 };
477      DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT,
478                      Op0->getVTList(), Ops, Op0->getNumOperands());
479      return Op0;
480    }
481  }
482
483  return SDValue();
484}
485
486static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
487                                  TargetLowering::DAGCombinerInfo &DCI,
488                                  const MipsSubtarget *Subtarget) {
489  if (DCI.isBeforeLegalize())
490    return SDValue();
491
492  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
493      selectMSUB(N, &DAG))
494    return SDValue(N, 0);
495
496  return SDValue();
497}
498
499static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
500                            EVT ShiftTy, SelectionDAG &DAG) {
501  // Clear the upper (64 - VT.sizeInBits) bits.
502  C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits());
503
504  // Return 0.
505  if (C == 0)
506    return DAG.getConstant(0, VT);
507
508  // Return x.
509  if (C == 1)
510    return X;
511
512  // If c is power of 2, return (shl x, log2(c)).
513  if (isPowerOf2_64(C))
514    return DAG.getNode(ISD::SHL, DL, VT, X,
515                       DAG.getConstant(Log2_64(C), ShiftTy));
516
517  unsigned Log2Ceil = Log2_64_Ceil(C);
518  uint64_t Floor = 1LL << Log2_64(C);
519  uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil;
520
521  // If |c - floor_c| <= |c - ceil_c|,
522  // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))),
523  // return (add constMult(x, floor_c), constMult(x, c - floor_c)).
524  if (C - Floor <= Ceil - C) {
525    SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG);
526    SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG);
527    return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
528  }
529
530  // If |c - floor_c| > |c - ceil_c|,
531  // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)).
532  SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG);
533  SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG);
534  return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
535}
536
537static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
538                                 const TargetLowering::DAGCombinerInfo &DCI,
539                                 const MipsSETargetLowering *TL) {
540  EVT VT = N->getValueType(0);
541
542  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
543    if (!VT.isVector())
544      return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N),
545                          VT, TL->getScalarShiftAmountTy(VT), DAG);
546
547  return SDValue(N, 0);
548}
549
550static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
551                                      SelectionDAG &DAG,
552                                      const MipsSubtarget *Subtarget) {
553  // See if this is a vector splat immediate node.
554  APInt SplatValue, SplatUndef;
555  unsigned SplatBitSize;
556  bool HasAnyUndefs;
557  unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
558  BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
559
560  if (!BV ||
561      !BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
562                           EltSize, !Subtarget->isLittle()) ||
563      (SplatBitSize != EltSize) ||
564      (SplatValue.getZExtValue() >= EltSize))
565    return SDValue();
566
567  return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
568                     DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
569}
570
571static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
572                                 TargetLowering::DAGCombinerInfo &DCI,
573                                 const MipsSubtarget *Subtarget) {
574  EVT Ty = N->getValueType(0);
575
576  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
577    return SDValue();
578
579  return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget);
580}
581
582// Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold
583// constant splats into MipsISD::SHRA_DSP for DSPr2.
584//
585// Performs the following transformations:
586// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its
587//   sign/zero-extension is completely overwritten by the new one performed by
588//   the ISD::SRA and ISD::SHL nodes.
589// - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL
590//   sequence.
591//
592// See performDSPShiftCombine for more information about the transformation
593// used for DSPr2.
594static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
595                                 TargetLowering::DAGCombinerInfo &DCI,
596                                 const MipsSubtarget *Subtarget) {
597  EVT Ty = N->getValueType(0);
598
599  if (Subtarget->hasMSA()) {
600    SDValue Op0 = N->getOperand(0);
601    SDValue Op1 = N->getOperand(1);
602
603    // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d)
604    // where $d + sizeof($c) == 32
605    // or    $d + sizeof($c) <= 32 and SExt
606    // -> (MipsVExtractSExt $a, $b, $c)
607    if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) {
608      SDValue Op0Op0 = Op0->getOperand(0);
609      ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1);
610
611      if (!ShAmount)
612        return SDValue();
613
614      if (Op0Op0->getOpcode() != MipsISD::VEXTRACT_SEXT_ELT &&
615          Op0Op0->getOpcode() != MipsISD::VEXTRACT_ZEXT_ELT)
616        return SDValue();
617
618      EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT();
619      unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits();
620
621      if (TotalBits == 32 ||
622          (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT &&
623           TotalBits <= 32)) {
624        SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1),
625                          Op0Op0->getOperand(2) };
626        DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT,
627                        Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands());
628        return Op0Op0;
629      }
630    }
631  }
632
633  if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2()))
634    return SDValue();
635
636  return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget);
637}
638
639
640static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG,
641                                 TargetLowering::DAGCombinerInfo &DCI,
642                                 const MipsSubtarget *Subtarget) {
643  EVT Ty = N->getValueType(0);
644
645  if (((Ty != MVT::v2i16) || !Subtarget->hasDSPR2()) && (Ty != MVT::v4i8))
646    return SDValue();
647
648  return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget);
649}
650
651static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC) {
652  bool IsV216 = (Ty == MVT::v2i16);
653
654  switch (CC) {
655  case ISD::SETEQ:
656  case ISD::SETNE:  return true;
657  case ISD::SETLT:
658  case ISD::SETLE:
659  case ISD::SETGT:
660  case ISD::SETGE:  return IsV216;
661  case ISD::SETULT:
662  case ISD::SETULE:
663  case ISD::SETUGT:
664  case ISD::SETUGE: return !IsV216;
665  default:          return false;
666  }
667}
668
669static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
670  EVT Ty = N->getValueType(0);
671
672  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
673    return SDValue();
674
675  if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get()))
676    return SDValue();
677
678  return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0),
679                     N->getOperand(1), N->getOperand(2));
680}
681
682static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
683  EVT Ty = N->getValueType(0);
684
685  if (Ty.is128BitVector() && Ty.isInteger()) {
686    // Try the following combines:
687    //   (vselect (setcc $a, $b, SETLT), $b, $a)) -> (vsmax $a, $b)
688    //   (vselect (setcc $a, $b, SETLE), $b, $a)) -> (vsmax $a, $b)
689    //   (vselect (setcc $a, $b, SETLT), $a, $b)) -> (vsmin $a, $b)
690    //   (vselect (setcc $a, $b, SETLE), $a, $b)) -> (vsmin $a, $b)
691    //   (vselect (setcc $a, $b, SETULT), $b, $a)) -> (vumax $a, $b)
692    //   (vselect (setcc $a, $b, SETULE), $b, $a)) -> (vumax $a, $b)
693    //   (vselect (setcc $a, $b, SETULT), $a, $b)) -> (vumin $a, $b)
694    //   (vselect (setcc $a, $b, SETULE), $a, $b)) -> (vumin $a, $b)
695    // SETGT/SETGE/SETUGT/SETUGE variants of these will show up initially but
696    // will be expanded to equivalent SETLT/SETLE/SETULT/SETULE versions by the
697    // legalizer.
698    SDValue Op0 = N->getOperand(0);
699
700    if (Op0->getOpcode() != ISD::SETCC)
701      return SDValue();
702
703    ISD::CondCode CondCode = cast<CondCodeSDNode>(Op0->getOperand(2))->get();
704    bool Signed;
705
706    if (CondCode == ISD::SETLT  || CondCode == ISD::SETLE)
707      Signed = true;
708    else if (CondCode == ISD::SETULT || CondCode == ISD::SETULE)
709      Signed = false;
710    else
711      return SDValue();
712
713    SDValue Op1 = N->getOperand(1);
714    SDValue Op2 = N->getOperand(2);
715    SDValue Op0Op0 = Op0->getOperand(0);
716    SDValue Op0Op1 = Op0->getOperand(1);
717
718    if (Op1 == Op0Op0 && Op2 == Op0Op1)
719      return DAG.getNode(Signed ? MipsISD::VSMIN : MipsISD::VUMIN, SDLoc(N),
720                         Ty, Op1, Op2);
721    else if (Op1 == Op0Op1 && Op2 == Op0Op0)
722      return DAG.getNode(Signed ? MipsISD::VSMAX : MipsISD::VUMAX, SDLoc(N),
723                         Ty, Op1, Op2);
724  } else if ((Ty == MVT::v2i16) || (Ty == MVT::v4i8)) {
725    SDValue SetCC = N->getOperand(0);
726
727    if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
728      return SDValue();
729
730    return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty,
731                       SetCC.getOperand(0), SetCC.getOperand(1),
732                       N->getOperand(1), N->getOperand(2), SetCC.getOperand(2));
733  }
734
735  return SDValue();
736}
737
738static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
739                                 const MipsSubtarget *Subtarget) {
740  EVT Ty = N->getValueType(0);
741
742  if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
743    // Try the following combines:
744    //   (xor (or $a, $b), (build_vector allones))
745    //   (xor (or $a, $b), (bitcast (build_vector allones)))
746    SDValue Op0 = N->getOperand(0);
747    SDValue Op1 = N->getOperand(1);
748    SDValue NotOp;
749
750    if (ISD::isBuildVectorAllOnes(Op0.getNode()))
751      NotOp = Op1;
752    else if (ISD::isBuildVectorAllOnes(Op1.getNode()))
753      NotOp = Op0;
754    else
755      return SDValue();
756
757    if (NotOp->getOpcode() == ISD::OR)
758      return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0),
759                         NotOp->getOperand(1));
760  }
761
762  return SDValue();
763}
764
765SDValue
766MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
767  SelectionDAG &DAG = DCI.DAG;
768  SDValue Val;
769
770  switch (N->getOpcode()) {
771  case ISD::ADDE:
772    return performADDECombine(N, DAG, DCI, Subtarget);
773  case ISD::AND:
774    Val = performANDCombine(N, DAG, DCI, Subtarget);
775    break;
776  case ISD::SUBE:
777    return performSUBECombine(N, DAG, DCI, Subtarget);
778  case ISD::MUL:
779    return performMULCombine(N, DAG, DCI, this);
780  case ISD::SHL:
781    return performSHLCombine(N, DAG, DCI, Subtarget);
782  case ISD::SRA:
783    return performSRACombine(N, DAG, DCI, Subtarget);
784  case ISD::SRL:
785    return performSRLCombine(N, DAG, DCI, Subtarget);
786  case ISD::VSELECT:
787    return performVSELECTCombine(N, DAG);
788  case ISD::XOR:
789    Val = performXORCombine(N, DAG, Subtarget);
790    break;
791  case ISD::SETCC:
792    Val = performSETCCCombine(N, DAG);
793    break;
794  }
795
796  if (Val.getNode())
797    return Val;
798
799  return MipsTargetLowering::PerformDAGCombine(N, DCI);
800}
801
802MachineBasicBlock *
803MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
804                                                  MachineBasicBlock *BB) const {
805  switch (MI->getOpcode()) {
806  default:
807    return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
808  case Mips::BPOSGE32_PSEUDO:
809    return emitBPOSGE32(MI, BB);
810  case Mips::SNZ_B_PSEUDO:
811    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B);
812  case Mips::SNZ_H_PSEUDO:
813    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H);
814  case Mips::SNZ_W_PSEUDO:
815    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W);
816  case Mips::SNZ_D_PSEUDO:
817    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D);
818  case Mips::SNZ_V_PSEUDO:
819    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V);
820  case Mips::SZ_B_PSEUDO:
821    return emitMSACBranchPseudo(MI, BB, Mips::BZ_B);
822  case Mips::SZ_H_PSEUDO:
823    return emitMSACBranchPseudo(MI, BB, Mips::BZ_H);
824  case Mips::SZ_W_PSEUDO:
825    return emitMSACBranchPseudo(MI, BB, Mips::BZ_W);
826  case Mips::SZ_D_PSEUDO:
827    return emitMSACBranchPseudo(MI, BB, Mips::BZ_D);
828  case Mips::SZ_V_PSEUDO:
829    return emitMSACBranchPseudo(MI, BB, Mips::BZ_V);
830  }
831}
832
833bool MipsSETargetLowering::
834isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
835                                  unsigned NextStackOffset,
836                                  const MipsFunctionInfo& FI) const {
837  if (!EnableMipsTailCalls)
838    return false;
839
840  // Return false if either the callee or caller has a byval argument.
841  if (MipsCCInfo.hasByValArg() || FI.hasByvalArg())
842    return false;
843
844  // Return true if the callee's argument area is no larger than the
845  // caller's.
846  return NextStackOffset <= FI.getIncomingArgSize();
847}
848
849void MipsSETargetLowering::
850getOpndList(SmallVectorImpl<SDValue> &Ops,
851            std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
852            bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
853            CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
854  // T9 should contain the address of the callee function if
855  // -reloction-model=pic or it is an indirect call.
856  if (IsPICCall || !GlobalOrExternal) {
857    unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
858    RegsToPass.push_front(std::make_pair(T9Reg, Callee));
859  } else
860    Ops.push_back(Callee);
861
862  MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal,
863                                  InternalLinkage, CLI, Callee, Chain);
864}
865
866SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
867  LoadSDNode &Nd = *cast<LoadSDNode>(Op);
868
869  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
870    return MipsTargetLowering::lowerLOAD(Op, DAG);
871
872  // Replace a double precision load with two i32 loads and a buildpair64.
873  SDLoc DL(Op);
874  SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
875  EVT PtrVT = Ptr.getValueType();
876
877  // i32 load from lower address.
878  SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr,
879                           MachinePointerInfo(), Nd.isVolatile(),
880                           Nd.isNonTemporal(), Nd.isInvariant(),
881                           Nd.getAlignment());
882
883  // i32 load from higher address.
884  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
885  SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
886                           MachinePointerInfo(), Nd.isVolatile(),
887                           Nd.isNonTemporal(), Nd.isInvariant(),
888                           std::min(Nd.getAlignment(), 4U));
889
890  if (!Subtarget->isLittle())
891    std::swap(Lo, Hi);
892
893  SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
894  SDValue Ops[2] = {BP, Hi.getValue(1)};
895  return DAG.getMergeValues(Ops, 2, DL);
896}
897
898SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
899  StoreSDNode &Nd = *cast<StoreSDNode>(Op);
900
901  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
902    return MipsTargetLowering::lowerSTORE(Op, DAG);
903
904  // Replace a double precision store with two extractelement64s and i32 stores.
905  SDLoc DL(Op);
906  SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
907  EVT PtrVT = Ptr.getValueType();
908  SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
909                           Val, DAG.getConstant(0, MVT::i32));
910  SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
911                           Val, DAG.getConstant(1, MVT::i32));
912
913  if (!Subtarget->isLittle())
914    std::swap(Lo, Hi);
915
916  // i32 store to lower address.
917  Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(),
918                       Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(),
919                       Nd.getTBAAInfo());
920
921  // i32 store to higher address.
922  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
923  return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
924                      Nd.isVolatile(), Nd.isNonTemporal(),
925                      std::min(Nd.getAlignment(), 4U), Nd.getTBAAInfo());
926}
927
928SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
929                                          bool HasLo, bool HasHi,
930                                          SelectionDAG &DAG) const {
931  EVT Ty = Op.getOperand(0).getValueType();
932  SDLoc DL(Op);
933  SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped,
934                             Op.getOperand(0), Op.getOperand(1));
935  SDValue Lo, Hi;
936
937  if (HasLo)
938    Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
939                     DAG.getConstant(Mips::sub_lo, MVT::i32));
940  if (HasHi)
941    Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
942                     DAG.getConstant(Mips::sub_hi, MVT::i32));
943
944  if (!HasLo || !HasHi)
945    return HasLo ? Lo : Hi;
946
947  SDValue Vals[] = { Lo, Hi };
948  return DAG.getMergeValues(Vals, 2, DL);
949}
950
951
952static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
953  SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
954                             DAG.getConstant(0, MVT::i32));
955  SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
956                             DAG.getConstant(1, MVT::i32));
957  return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi);
958}
959
960static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) {
961  SDValue Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
962                           DAG.getConstant(Mips::sub_lo, MVT::i32));
963  SDValue Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
964                           DAG.getConstant(Mips::sub_hi, MVT::i32));
965  return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
966}
967
968// This function expands mips intrinsic nodes which have 64-bit input operands
969// or output values.
970//
971// out64 = intrinsic-node in64
972// =>
973// lo = copy (extract-element (in64, 0))
974// hi = copy (extract-element (in64, 1))
975// mips-specific-node
976// v0 = copy lo
977// v1 = copy hi
978// out64 = merge-values (v0, v1)
979//
980static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
981  SDLoc DL(Op);
982  bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
983  SmallVector<SDValue, 3> Ops;
984  unsigned OpNo = 0;
985
986  // See if Op has a chain input.
987  if (HasChainIn)
988    Ops.push_back(Op->getOperand(OpNo++));
989
990  // The next operand is the intrinsic opcode.
991  assert(Op->getOperand(OpNo).getOpcode() == ISD::TargetConstant);
992
993  // See if the next operand has type i64.
994  SDValue Opnd = Op->getOperand(++OpNo), In64;
995
996  if (Opnd.getValueType() == MVT::i64)
997    In64 = initAccumulator(Opnd, DL, DAG);
998  else
999    Ops.push_back(Opnd);
1000
1001  // Push the remaining operands.
1002  for (++OpNo ; OpNo < Op->getNumOperands(); ++OpNo)
1003    Ops.push_back(Op->getOperand(OpNo));
1004
1005  // Add In64 to the end of the list.
1006  if (In64.getNode())
1007    Ops.push_back(In64);
1008
1009  // Scan output.
1010  SmallVector<EVT, 2> ResTys;
1011
1012  for (SDNode::value_iterator I = Op->value_begin(), E = Op->value_end();
1013       I != E; ++I)
1014    ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I);
1015
1016  // Create node.
1017  SDValue Val = DAG.getNode(Opc, DL, ResTys, &Ops[0], Ops.size());
1018  SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val;
1019
1020  if (!HasChainIn)
1021    return Out;
1022
1023  assert(Val->getValueType(1) == MVT::Other);
1024  SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) };
1025  return DAG.getMergeValues(Vals, 2, DL);
1026}
1027
1028static SDValue lowerMSABinaryIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1029  SDLoc DL(Op);
1030  SDValue LHS = Op->getOperand(1);
1031  SDValue RHS = Op->getOperand(2);
1032  EVT ResTy = Op->getValueType(0);
1033
1034  SDValue Result = DAG.getNode(Opc, DL, ResTy, LHS, RHS);
1035
1036  return Result;
1037}
1038
1039static SDValue lowerMSABinaryImmIntr(SDValue Op, SelectionDAG &DAG,
1040                                     unsigned Opc, SDValue RHS) {
1041  SDValue LHS = Op->getOperand(1);
1042  EVT ResTy = Op->getValueType(0);
1043
1044  return DAG.getNode(Opc, SDLoc(Op), ResTy, LHS, RHS);
1045}
1046
1047static SDValue lowerMSABranchIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1048  SDLoc DL(Op);
1049  SDValue Value = Op->getOperand(1);
1050  EVT ResTy = Op->getValueType(0);
1051
1052  SDValue Result = DAG.getNode(Opc, DL, ResTy, Value);
1053
1054  return Result;
1055}
1056
1057// Lower an MSA copy intrinsic into the specified SelectionDAG node
1058static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1059  SDLoc DL(Op);
1060  SDValue Vec = Op->getOperand(1);
1061  SDValue Idx = Op->getOperand(2);
1062  EVT ResTy = Op->getValueType(0);
1063  EVT EltTy = Vec->getValueType(0).getVectorElementType();
1064
1065  SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx,
1066                               DAG.getValueType(EltTy));
1067
1068  return Result;
1069}
1070
1071// Lower an MSA insert intrinsic into the specified SelectionDAG node
1072static SDValue lowerMSAInsertIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1073  SDLoc DL(Op);
1074  SDValue Op0 = Op->getOperand(1);
1075  SDValue Op1 = Op->getOperand(2);
1076  SDValue Op2 = Op->getOperand(3);
1077  EVT ResTy = Op->getValueType(0);
1078
1079  SDValue Result = DAG.getNode(Opc, DL, ResTy, Op0, Op2, Op1);
1080
1081  return Result;
1082}
1083
1084static SDValue lowerMSASplatImm(SDValue Op, SDValue ImmOp, SelectionDAG &DAG) {
1085  EVT ResTy = Op->getValueType(0);
1086  EVT ViaVecTy = ResTy;
1087  SmallVector<SDValue, 16> Ops;
1088  SDValue ImmHiOp;
1089  SDLoc DL(Op);
1090
1091  if (ViaVecTy == MVT::v2i64) {
1092    ImmHiOp = DAG.getNode(ISD::SRA, DL, MVT::i32, ImmOp,
1093                          DAG.getConstant(31, MVT::i32));
1094    for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i) {
1095      Ops.push_back(ImmHiOp);
1096      Ops.push_back(ImmOp);
1097    }
1098    ViaVecTy = MVT::v4i32;
1099  } else {
1100    for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
1101      Ops.push_back(ImmOp);
1102  }
1103
1104  SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, &Ops[0],
1105                               Ops.size());
1106
1107  if (ResTy != ViaVecTy)
1108    Result = DAG.getNode(ISD::BITCAST, DL, ResTy, Result);
1109
1110  return Result;
1111}
1112
1113static SDValue
1114lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
1115  return lowerMSASplatImm(Op, Op->getOperand(ImmOp), DAG);
1116}
1117
1118static SDValue lowerMSAUnaryIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1119  SDLoc DL(Op);
1120  SDValue Value = Op->getOperand(1);
1121  EVT ResTy = Op->getValueType(0);
1122
1123  SDValue Result = DAG.getNode(Opc, DL, ResTy, Value);
1124
1125  return Result;
1126}
1127
1128SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
1129                                                      SelectionDAG &DAG) const {
1130  switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
1131  default:
1132    return SDValue();
1133  case Intrinsic::mips_shilo:
1134    return lowerDSPIntr(Op, DAG, MipsISD::SHILO);
1135  case Intrinsic::mips_dpau_h_qbl:
1136    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL);
1137  case Intrinsic::mips_dpau_h_qbr:
1138    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR);
1139  case Intrinsic::mips_dpsu_h_qbl:
1140    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL);
1141  case Intrinsic::mips_dpsu_h_qbr:
1142    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR);
1143  case Intrinsic::mips_dpa_w_ph:
1144    return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH);
1145  case Intrinsic::mips_dps_w_ph:
1146    return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH);
1147  case Intrinsic::mips_dpax_w_ph:
1148    return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH);
1149  case Intrinsic::mips_dpsx_w_ph:
1150    return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH);
1151  case Intrinsic::mips_mulsa_w_ph:
1152    return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH);
1153  case Intrinsic::mips_mult:
1154    return lowerDSPIntr(Op, DAG, MipsISD::Mult);
1155  case Intrinsic::mips_multu:
1156    return lowerDSPIntr(Op, DAG, MipsISD::Multu);
1157  case Intrinsic::mips_madd:
1158    return lowerDSPIntr(Op, DAG, MipsISD::MAdd);
1159  case Intrinsic::mips_maddu:
1160    return lowerDSPIntr(Op, DAG, MipsISD::MAddu);
1161  case Intrinsic::mips_msub:
1162    return lowerDSPIntr(Op, DAG, MipsISD::MSub);
1163  case Intrinsic::mips_msubu:
1164    return lowerDSPIntr(Op, DAG, MipsISD::MSubu);
1165  case Intrinsic::mips_addv_b:
1166  case Intrinsic::mips_addv_h:
1167  case Intrinsic::mips_addv_w:
1168  case Intrinsic::mips_addv_d:
1169    return lowerMSABinaryIntr(Op, DAG, ISD::ADD);
1170  case Intrinsic::mips_addvi_b:
1171  case Intrinsic::mips_addvi_h:
1172  case Intrinsic::mips_addvi_w:
1173  case Intrinsic::mips_addvi_d:
1174    return lowerMSABinaryImmIntr(Op, DAG, ISD::ADD,
1175                                 lowerMSASplatImm(Op, 2, DAG));
1176  case Intrinsic::mips_and_v:
1177    return lowerMSABinaryIntr(Op, DAG, ISD::AND);
1178  case Intrinsic::mips_andi_b:
1179    return lowerMSABinaryImmIntr(Op, DAG, ISD::AND,
1180                                 lowerMSASplatImm(Op, 2, DAG));
1181  case Intrinsic::mips_bnz_b:
1182  case Intrinsic::mips_bnz_h:
1183  case Intrinsic::mips_bnz_w:
1184  case Intrinsic::mips_bnz_d:
1185    return lowerMSABranchIntr(Op, DAG, MipsISD::VALL_NONZERO);
1186  case Intrinsic::mips_bnz_v:
1187    return lowerMSABranchIntr(Op, DAG, MipsISD::VANY_NONZERO);
1188  case Intrinsic::mips_bsel_v:
1189    return DAG.getNode(ISD::VSELECT, SDLoc(Op), Op->getValueType(0),
1190                       Op->getOperand(1), Op->getOperand(2),
1191                       Op->getOperand(3));
1192  case Intrinsic::mips_bseli_b:
1193    return DAG.getNode(ISD::VSELECT, SDLoc(Op), Op->getValueType(0),
1194                       Op->getOperand(1), Op->getOperand(2),
1195                       lowerMSASplatImm(Op, 3, DAG));
1196  case Intrinsic::mips_bz_b:
1197  case Intrinsic::mips_bz_h:
1198  case Intrinsic::mips_bz_w:
1199  case Intrinsic::mips_bz_d:
1200    return lowerMSABranchIntr(Op, DAG, MipsISD::VALL_ZERO);
1201  case Intrinsic::mips_bz_v:
1202    return lowerMSABranchIntr(Op, DAG, MipsISD::VANY_ZERO);
1203  case Intrinsic::mips_ceq_b:
1204  case Intrinsic::mips_ceq_h:
1205  case Intrinsic::mips_ceq_w:
1206  case Intrinsic::mips_ceq_d:
1207    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1208                        Op->getOperand(2), ISD::SETEQ);
1209  case Intrinsic::mips_ceqi_b:
1210  case Intrinsic::mips_ceqi_h:
1211  case Intrinsic::mips_ceqi_w:
1212  case Intrinsic::mips_ceqi_d:
1213    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1214                        lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ);
1215  case Intrinsic::mips_cle_s_b:
1216  case Intrinsic::mips_cle_s_h:
1217  case Intrinsic::mips_cle_s_w:
1218  case Intrinsic::mips_cle_s_d:
1219    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1220                        Op->getOperand(2), ISD::SETLE);
1221  case Intrinsic::mips_clei_s_b:
1222  case Intrinsic::mips_clei_s_h:
1223  case Intrinsic::mips_clei_s_w:
1224  case Intrinsic::mips_clei_s_d:
1225    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1226                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLE);
1227  case Intrinsic::mips_cle_u_b:
1228  case Intrinsic::mips_cle_u_h:
1229  case Intrinsic::mips_cle_u_w:
1230  case Intrinsic::mips_cle_u_d:
1231    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1232                        Op->getOperand(2), ISD::SETULE);
1233  case Intrinsic::mips_clei_u_b:
1234  case Intrinsic::mips_clei_u_h:
1235  case Intrinsic::mips_clei_u_w:
1236  case Intrinsic::mips_clei_u_d:
1237    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1238                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULE);
1239  case Intrinsic::mips_clt_s_b:
1240  case Intrinsic::mips_clt_s_h:
1241  case Intrinsic::mips_clt_s_w:
1242  case Intrinsic::mips_clt_s_d:
1243    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1244                        Op->getOperand(2), ISD::SETLT);
1245  case Intrinsic::mips_clti_s_b:
1246  case Intrinsic::mips_clti_s_h:
1247  case Intrinsic::mips_clti_s_w:
1248  case Intrinsic::mips_clti_s_d:
1249    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1250                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLT);
1251  case Intrinsic::mips_clt_u_b:
1252  case Intrinsic::mips_clt_u_h:
1253  case Intrinsic::mips_clt_u_w:
1254  case Intrinsic::mips_clt_u_d:
1255    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1256                        Op->getOperand(2), ISD::SETULT);
1257  case Intrinsic::mips_clti_u_b:
1258  case Intrinsic::mips_clti_u_h:
1259  case Intrinsic::mips_clti_u_w:
1260  case Intrinsic::mips_clti_u_d:
1261    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1262                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULT);
1263  case Intrinsic::mips_copy_s_b:
1264  case Intrinsic::mips_copy_s_h:
1265  case Intrinsic::mips_copy_s_w:
1266    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
1267  case Intrinsic::mips_copy_u_b:
1268  case Intrinsic::mips_copy_u_h:
1269  case Intrinsic::mips_copy_u_w:
1270    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
1271  case Intrinsic::mips_div_s_b:
1272  case Intrinsic::mips_div_s_h:
1273  case Intrinsic::mips_div_s_w:
1274  case Intrinsic::mips_div_s_d:
1275    return lowerMSABinaryIntr(Op, DAG, ISD::SDIV);
1276  case Intrinsic::mips_div_u_b:
1277  case Intrinsic::mips_div_u_h:
1278  case Intrinsic::mips_div_u_w:
1279  case Intrinsic::mips_div_u_d:
1280    return lowerMSABinaryIntr(Op, DAG, ISD::UDIV);
1281  case Intrinsic::mips_fadd_w:
1282  case Intrinsic::mips_fadd_d:
1283    return lowerMSABinaryIntr(Op, DAG, ISD::FADD);
1284  // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away
1285  case Intrinsic::mips_fceq_w:
1286  case Intrinsic::mips_fceq_d:
1287    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1288                        Op->getOperand(2), ISD::SETOEQ);
1289  case Intrinsic::mips_fcle_w:
1290  case Intrinsic::mips_fcle_d:
1291    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1292                        Op->getOperand(2), ISD::SETOLE);
1293  case Intrinsic::mips_fclt_w:
1294  case Intrinsic::mips_fclt_d:
1295    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1296                        Op->getOperand(2), ISD::SETOLT);
1297  case Intrinsic::mips_fcne_w:
1298  case Intrinsic::mips_fcne_d:
1299    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1300                        Op->getOperand(2), ISD::SETONE);
1301  case Intrinsic::mips_fcor_w:
1302  case Intrinsic::mips_fcor_d:
1303    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1304                        Op->getOperand(2), ISD::SETO);
1305  case Intrinsic::mips_fcueq_w:
1306  case Intrinsic::mips_fcueq_d:
1307    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1308                        Op->getOperand(2), ISD::SETUEQ);
1309  case Intrinsic::mips_fcule_w:
1310  case Intrinsic::mips_fcule_d:
1311    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1312                        Op->getOperand(2), ISD::SETULE);
1313  case Intrinsic::mips_fcult_w:
1314  case Intrinsic::mips_fcult_d:
1315    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1316                        Op->getOperand(2), ISD::SETULT);
1317  case Intrinsic::mips_fcun_w:
1318  case Intrinsic::mips_fcun_d:
1319    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1320                        Op->getOperand(2), ISD::SETUO);
1321  case Intrinsic::mips_fcune_w:
1322  case Intrinsic::mips_fcune_d:
1323    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1324                        Op->getOperand(2), ISD::SETUNE);
1325  case Intrinsic::mips_fdiv_w:
1326  case Intrinsic::mips_fdiv_d:
1327    return lowerMSABinaryIntr(Op, DAG, ISD::FDIV);
1328  case Intrinsic::mips_fill_b:
1329  case Intrinsic::mips_fill_h:
1330  case Intrinsic::mips_fill_w: {
1331    SmallVector<SDValue, 16> Ops;
1332    EVT ResTy = Op->getValueType(0);
1333
1334    for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
1335      Ops.push_back(Op->getOperand(1));
1336
1337    return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), ResTy, &Ops[0],
1338                       Ops.size());
1339  }
1340  case Intrinsic::mips_flog2_w:
1341  case Intrinsic::mips_flog2_d:
1342    return lowerMSAUnaryIntr(Op, DAG, ISD::FLOG2);
1343  case Intrinsic::mips_fmul_w:
1344  case Intrinsic::mips_fmul_d:
1345    return lowerMSABinaryIntr(Op, DAG, ISD::FMUL);
1346  case Intrinsic::mips_frint_w:
1347  case Intrinsic::mips_frint_d:
1348    return lowerMSAUnaryIntr(Op, DAG, ISD::FRINT);
1349  case Intrinsic::mips_fsqrt_w:
1350  case Intrinsic::mips_fsqrt_d:
1351    return lowerMSAUnaryIntr(Op, DAG, ISD::FSQRT);
1352  case Intrinsic::mips_fsub_w:
1353  case Intrinsic::mips_fsub_d:
1354    return lowerMSABinaryIntr(Op, DAG, ISD::FSUB);
1355  case Intrinsic::mips_ilvev_b:
1356  case Intrinsic::mips_ilvev_h:
1357  case Intrinsic::mips_ilvev_w:
1358  case Intrinsic::mips_ilvev_d:
1359    return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), Op->getValueType(0),
1360                       Op->getOperand(1), Op->getOperand(2));
1361  case Intrinsic::mips_ilvl_b:
1362  case Intrinsic::mips_ilvl_h:
1363  case Intrinsic::mips_ilvl_w:
1364  case Intrinsic::mips_ilvl_d:
1365    return DAG.getNode(MipsISD::ILVL, SDLoc(Op), Op->getValueType(0),
1366                       Op->getOperand(1), Op->getOperand(2));
1367  case Intrinsic::mips_ilvod_b:
1368  case Intrinsic::mips_ilvod_h:
1369  case Intrinsic::mips_ilvod_w:
1370  case Intrinsic::mips_ilvod_d:
1371    return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), Op->getValueType(0),
1372                       Op->getOperand(1), Op->getOperand(2));
1373  case Intrinsic::mips_ilvr_b:
1374  case Intrinsic::mips_ilvr_h:
1375  case Intrinsic::mips_ilvr_w:
1376  case Intrinsic::mips_ilvr_d:
1377    return DAG.getNode(MipsISD::ILVR, SDLoc(Op), Op->getValueType(0),
1378                       Op->getOperand(1), Op->getOperand(2));
1379  case Intrinsic::mips_insert_b:
1380  case Intrinsic::mips_insert_h:
1381  case Intrinsic::mips_insert_w:
1382    return lowerMSAInsertIntr(Op, DAG, ISD::INSERT_VECTOR_ELT);
1383  case Intrinsic::mips_ldi_b:
1384  case Intrinsic::mips_ldi_h:
1385  case Intrinsic::mips_ldi_w:
1386  case Intrinsic::mips_ldi_d:
1387    return lowerMSASplatImm(Op, 1, DAG);
1388  case Intrinsic::mips_max_s_b:
1389  case Intrinsic::mips_max_s_h:
1390  case Intrinsic::mips_max_s_w:
1391  case Intrinsic::mips_max_s_d:
1392    return lowerMSABinaryIntr(Op, DAG, MipsISD::VSMAX);
1393  case Intrinsic::mips_max_u_b:
1394  case Intrinsic::mips_max_u_h:
1395  case Intrinsic::mips_max_u_w:
1396  case Intrinsic::mips_max_u_d:
1397    return lowerMSABinaryIntr(Op, DAG, MipsISD::VUMAX);
1398  case Intrinsic::mips_maxi_s_b:
1399  case Intrinsic::mips_maxi_s_h:
1400  case Intrinsic::mips_maxi_s_w:
1401  case Intrinsic::mips_maxi_s_d:
1402    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VSMAX,
1403                                 lowerMSASplatImm(Op, 2, DAG));
1404  case Intrinsic::mips_maxi_u_b:
1405  case Intrinsic::mips_maxi_u_h:
1406  case Intrinsic::mips_maxi_u_w:
1407  case Intrinsic::mips_maxi_u_d:
1408    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VUMAX,
1409                                 lowerMSASplatImm(Op, 2, DAG));
1410  case Intrinsic::mips_min_s_b:
1411  case Intrinsic::mips_min_s_h:
1412  case Intrinsic::mips_min_s_w:
1413  case Intrinsic::mips_min_s_d:
1414    return lowerMSABinaryIntr(Op, DAG, MipsISD::VSMIN);
1415  case Intrinsic::mips_min_u_b:
1416  case Intrinsic::mips_min_u_h:
1417  case Intrinsic::mips_min_u_w:
1418  case Intrinsic::mips_min_u_d:
1419    return lowerMSABinaryIntr(Op, DAG, MipsISD::VUMIN);
1420  case Intrinsic::mips_mini_s_b:
1421  case Intrinsic::mips_mini_s_h:
1422  case Intrinsic::mips_mini_s_w:
1423  case Intrinsic::mips_mini_s_d:
1424    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VSMIN,
1425                                 lowerMSASplatImm(Op, 2, DAG));
1426  case Intrinsic::mips_mini_u_b:
1427  case Intrinsic::mips_mini_u_h:
1428  case Intrinsic::mips_mini_u_w:
1429  case Intrinsic::mips_mini_u_d:
1430    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VUMIN,
1431                                 lowerMSASplatImm(Op, 2, DAG));
1432  case Intrinsic::mips_mulv_b:
1433  case Intrinsic::mips_mulv_h:
1434  case Intrinsic::mips_mulv_w:
1435  case Intrinsic::mips_mulv_d:
1436    return lowerMSABinaryIntr(Op, DAG, ISD::MUL);
1437  case Intrinsic::mips_nlzc_b:
1438  case Intrinsic::mips_nlzc_h:
1439  case Intrinsic::mips_nlzc_w:
1440  case Intrinsic::mips_nlzc_d:
1441    return lowerMSAUnaryIntr(Op, DAG, ISD::CTLZ);
1442  case Intrinsic::mips_nor_v: {
1443    SDValue Res = lowerMSABinaryIntr(Op, DAG, ISD::OR);
1444    return DAG.getNOT(SDLoc(Op), Res, Res->getValueType(0));
1445  }
1446  case Intrinsic::mips_nori_b: {
1447    SDValue Res = lowerMSABinaryImmIntr(Op, DAG, ISD::OR,
1448                                        lowerMSASplatImm(Op, 2, DAG));
1449    return DAG.getNOT(SDLoc(Op), Res, Res->getValueType(0));
1450  }
1451  case Intrinsic::mips_or_v:
1452    return lowerMSABinaryIntr(Op, DAG, ISD::OR);
1453  case Intrinsic::mips_ori_b:
1454    return lowerMSABinaryImmIntr(Op, DAG, ISD::OR,
1455                                 lowerMSASplatImm(Op, 2, DAG));
1456  case Intrinsic::mips_pckev_b:
1457  case Intrinsic::mips_pckev_h:
1458  case Intrinsic::mips_pckev_w:
1459  case Intrinsic::mips_pckev_d:
1460    return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), Op->getValueType(0),
1461                       Op->getOperand(1), Op->getOperand(2));
1462  case Intrinsic::mips_pckod_b:
1463  case Intrinsic::mips_pckod_h:
1464  case Intrinsic::mips_pckod_w:
1465  case Intrinsic::mips_pckod_d:
1466    return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), Op->getValueType(0),
1467                       Op->getOperand(1), Op->getOperand(2));
1468  case Intrinsic::mips_pcnt_b:
1469  case Intrinsic::mips_pcnt_h:
1470  case Intrinsic::mips_pcnt_w:
1471  case Intrinsic::mips_pcnt_d:
1472    return lowerMSAUnaryIntr(Op, DAG, ISD::CTPOP);
1473  case Intrinsic::mips_shf_b:
1474  case Intrinsic::mips_shf_h:
1475  case Intrinsic::mips_shf_w:
1476    return DAG.getNode(MipsISD::SHF, SDLoc(Op), Op->getValueType(0),
1477                       Op->getOperand(2), Op->getOperand(1));
1478  case Intrinsic::mips_sll_b:
1479  case Intrinsic::mips_sll_h:
1480  case Intrinsic::mips_sll_w:
1481  case Intrinsic::mips_sll_d:
1482    return lowerMSABinaryIntr(Op, DAG, ISD::SHL);
1483  case Intrinsic::mips_slli_b:
1484  case Intrinsic::mips_slli_h:
1485  case Intrinsic::mips_slli_w:
1486  case Intrinsic::mips_slli_d:
1487    return lowerMSABinaryImmIntr(Op, DAG, ISD::SHL,
1488                                 lowerMSASplatImm(Op, 2, DAG));
1489  case Intrinsic::mips_sra_b:
1490  case Intrinsic::mips_sra_h:
1491  case Intrinsic::mips_sra_w:
1492  case Intrinsic::mips_sra_d:
1493    return lowerMSABinaryIntr(Op, DAG, ISD::SRA);
1494  case Intrinsic::mips_srai_b:
1495  case Intrinsic::mips_srai_h:
1496  case Intrinsic::mips_srai_w:
1497  case Intrinsic::mips_srai_d:
1498    return lowerMSABinaryImmIntr(Op, DAG, ISD::SRA,
1499                                 lowerMSASplatImm(Op, 2, DAG));
1500  case Intrinsic::mips_srl_b:
1501  case Intrinsic::mips_srl_h:
1502  case Intrinsic::mips_srl_w:
1503  case Intrinsic::mips_srl_d:
1504    return lowerMSABinaryIntr(Op, DAG, ISD::SRL);
1505  case Intrinsic::mips_srli_b:
1506  case Intrinsic::mips_srli_h:
1507  case Intrinsic::mips_srli_w:
1508  case Intrinsic::mips_srli_d:
1509    return lowerMSABinaryImmIntr(Op, DAG, ISD::SRL,
1510                                 lowerMSASplatImm(Op, 2, DAG));
1511  case Intrinsic::mips_subv_b:
1512  case Intrinsic::mips_subv_h:
1513  case Intrinsic::mips_subv_w:
1514  case Intrinsic::mips_subv_d:
1515    return lowerMSABinaryIntr(Op, DAG, ISD::SUB);
1516  case Intrinsic::mips_subvi_b:
1517  case Intrinsic::mips_subvi_h:
1518  case Intrinsic::mips_subvi_w:
1519  case Intrinsic::mips_subvi_d:
1520    return lowerMSABinaryImmIntr(Op, DAG, ISD::SUB,
1521                                 lowerMSASplatImm(Op, 2, DAG));
1522  case Intrinsic::mips_vshf_b:
1523  case Intrinsic::mips_vshf_h:
1524  case Intrinsic::mips_vshf_w:
1525  case Intrinsic::mips_vshf_d:
1526    return DAG.getNode(MipsISD::VSHF, SDLoc(Op), Op->getValueType(0),
1527                       Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
1528  case Intrinsic::mips_xor_v:
1529    return lowerMSABinaryIntr(Op, DAG, ISD::XOR);
1530  case Intrinsic::mips_xori_b:
1531    return lowerMSABinaryImmIntr(Op, DAG, ISD::XOR,
1532                                 lowerMSASplatImm(Op, 2, DAG));
1533  }
1534}
1535
1536static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
1537  SDLoc DL(Op);
1538  SDValue ChainIn = Op->getOperand(0);
1539  SDValue Address = Op->getOperand(2);
1540  SDValue Offset  = Op->getOperand(3);
1541  EVT ResTy = Op->getValueType(0);
1542  EVT PtrTy = Address->getValueType(0);
1543
1544  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
1545
1546  return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false,
1547                     false, false, 16);
1548}
1549
1550SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
1551                                                     SelectionDAG &DAG) const {
1552  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
1553  switch (Intr) {
1554  default:
1555    return SDValue();
1556  case Intrinsic::mips_extp:
1557    return lowerDSPIntr(Op, DAG, MipsISD::EXTP);
1558  case Intrinsic::mips_extpdp:
1559    return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP);
1560  case Intrinsic::mips_extr_w:
1561    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W);
1562  case Intrinsic::mips_extr_r_w:
1563    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W);
1564  case Intrinsic::mips_extr_rs_w:
1565    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W);
1566  case Intrinsic::mips_extr_s_h:
1567    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H);
1568  case Intrinsic::mips_mthlip:
1569    return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP);
1570  case Intrinsic::mips_mulsaq_s_w_ph:
1571    return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH);
1572  case Intrinsic::mips_maq_s_w_phl:
1573    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL);
1574  case Intrinsic::mips_maq_s_w_phr:
1575    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR);
1576  case Intrinsic::mips_maq_sa_w_phl:
1577    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL);
1578  case Intrinsic::mips_maq_sa_w_phr:
1579    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR);
1580  case Intrinsic::mips_dpaq_s_w_ph:
1581    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH);
1582  case Intrinsic::mips_dpsq_s_w_ph:
1583    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH);
1584  case Intrinsic::mips_dpaq_sa_l_w:
1585    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W);
1586  case Intrinsic::mips_dpsq_sa_l_w:
1587    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W);
1588  case Intrinsic::mips_dpaqx_s_w_ph:
1589    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH);
1590  case Intrinsic::mips_dpaqx_sa_w_ph:
1591    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH);
1592  case Intrinsic::mips_dpsqx_s_w_ph:
1593    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH);
1594  case Intrinsic::mips_dpsqx_sa_w_ph:
1595    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH);
1596  case Intrinsic::mips_ld_b:
1597  case Intrinsic::mips_ld_h:
1598  case Intrinsic::mips_ld_w:
1599  case Intrinsic::mips_ld_d:
1600  case Intrinsic::mips_ldx_b:
1601  case Intrinsic::mips_ldx_h:
1602  case Intrinsic::mips_ldx_w:
1603  case Intrinsic::mips_ldx_d:
1604   return lowerMSALoadIntr(Op, DAG, Intr);
1605  }
1606}
1607
1608static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
1609  SDLoc DL(Op);
1610  SDValue ChainIn = Op->getOperand(0);
1611  SDValue Value   = Op->getOperand(2);
1612  SDValue Address = Op->getOperand(3);
1613  SDValue Offset  = Op->getOperand(4);
1614  EVT PtrTy = Address->getValueType(0);
1615
1616  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
1617
1618  return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false,
1619                      false, 16);
1620}
1621
1622SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
1623                                                  SelectionDAG &DAG) const {
1624  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
1625  switch (Intr) {
1626  default:
1627    return SDValue();
1628  case Intrinsic::mips_st_b:
1629  case Intrinsic::mips_st_h:
1630  case Intrinsic::mips_st_w:
1631  case Intrinsic::mips_st_d:
1632  case Intrinsic::mips_stx_b:
1633  case Intrinsic::mips_stx_h:
1634  case Intrinsic::mips_stx_w:
1635  case Intrinsic::mips_stx_d:
1636    return lowerMSAStoreIntr(Op, DAG, Intr);
1637  }
1638}
1639
1640/// \brief Check if the given BuildVectorSDNode is a splat.
1641/// This method currently relies on DAG nodes being reused when equivalent,
1642/// so it's possible for this to return false even when isConstantSplat returns
1643/// true.
1644static bool isSplatVector(const BuildVectorSDNode *N) {
1645  unsigned int nOps = N->getNumOperands();
1646  assert(nOps > 1 && "isSplat has 0 or 1 sized build vector");
1647
1648  SDValue Operand0 = N->getOperand(0);
1649
1650  for (unsigned int i = 1; i < nOps; ++i) {
1651    if (N->getOperand(i) != Operand0)
1652      return false;
1653  }
1654
1655  return true;
1656}
1657
1658// Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT.
1659//
1660// The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We
1661// choose to sign-extend but we could have equally chosen zero-extend. The
1662// DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT
1663// result into this node later (possibly changing it to a zero-extend in the
1664// process).
1665SDValue MipsSETargetLowering::
1666lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
1667  SDLoc DL(Op);
1668  EVT ResTy = Op->getValueType(0);
1669  SDValue Op0 = Op->getOperand(0);
1670  SDValue Op1 = Op->getOperand(1);
1671  EVT EltTy = Op0->getValueType(0).getVectorElementType();
1672  return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1,
1673                     DAG.getValueType(EltTy));
1674}
1675
1676static bool isConstantOrUndef(const SDValue Op) {
1677  if (Op->getOpcode() == ISD::UNDEF)
1678    return true;
1679  if (dyn_cast<ConstantSDNode>(Op))
1680    return true;
1681  if (dyn_cast<ConstantFPSDNode>(Op))
1682    return true;
1683  return false;
1684}
1685
1686static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) {
1687  for (unsigned i = 0; i < Op->getNumOperands(); ++i)
1688    if (isConstantOrUndef(Op->getOperand(i)))
1689      return true;
1690  return false;
1691}
1692
1693// Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the
1694// backend.
1695//
1696// Lowers according to the following rules:
1697// - Constant splats are legal as-is as long as the SplatBitSize is a power of
1698//   2 less than or equal to 64 and the value fits into a signed 10-bit
1699//   immediate
1700// - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize
1701//   is a power of 2 less than or equal to 64 and the value does not fit into a
1702//   signed 10-bit immediate
1703// - Non-constant splats are legal as-is.
1704// - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT.
1705// - All others are illegal and must be expanded.
1706SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
1707                                                SelectionDAG &DAG) const {
1708  BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
1709  EVT ResTy = Op->getValueType(0);
1710  SDLoc DL(Op);
1711  APInt SplatValue, SplatUndef;
1712  unsigned SplatBitSize;
1713  bool HasAnyUndefs;
1714
1715  if (!Subtarget->hasMSA() || !ResTy.is128BitVector())
1716    return SDValue();
1717
1718  if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
1719                            HasAnyUndefs, 8,
1720                            !Subtarget->isLittle()) && SplatBitSize <= 64) {
1721    // We can only cope with 8, 16, 32, or 64-bit elements
1722    if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1723        SplatBitSize != 64)
1724      return SDValue();
1725
1726    // If the value fits into a simm10 then we can use ldi.[bhwd]
1727    if (SplatValue.isSignedIntN(10))
1728      return Op;
1729
1730    EVT ViaVecTy;
1731
1732    switch (SplatBitSize) {
1733    default:
1734      return SDValue();
1735    case 8:
1736      ViaVecTy = MVT::v16i8;
1737      break;
1738    case 16:
1739      ViaVecTy = MVT::v8i16;
1740      break;
1741    case 32:
1742      ViaVecTy = MVT::v4i32;
1743      break;
1744    case 64:
1745      // There's no fill.d to fall back on for 64-bit values
1746      return SDValue();
1747    }
1748
1749    SmallVector<SDValue, 16> Ops;
1750    SDValue Constant = DAG.getConstant(SplatValue.sextOrSelf(32), MVT::i32);
1751
1752    for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i)
1753      Ops.push_back(Constant);
1754
1755    SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Node), ViaVecTy,
1756                                 &Ops[0], Ops.size());
1757
1758    if (ViaVecTy != ResTy)
1759      Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
1760
1761    return Result;
1762  } else if (isSplatVector(Node))
1763    return Op;
1764  else if (!isConstantOrUndefBUILD_VECTOR(Node)) {
1765    // Use INSERT_VECTOR_ELT operations rather than expand to stores.
1766    // The resulting code is the same length as the expansion, but it doesn't
1767    // use memory operations
1768    EVT ResTy = Node->getValueType(0);
1769
1770    assert(ResTy.isVector());
1771
1772    unsigned NumElts = ResTy.getVectorNumElements();
1773    SDValue Vector = DAG.getUNDEF(ResTy);
1774    for (unsigned i = 0; i < NumElts; ++i) {
1775      Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
1776                           Node->getOperand(i),
1777                           DAG.getConstant(i, MVT::i32));
1778    }
1779    return Vector;
1780  }
1781
1782  return SDValue();
1783}
1784
1785// Lower VECTOR_SHUFFLE into SHF (if possible).
1786//
1787// SHF splits the vector into blocks of four elements, then shuffles these
1788// elements according to a <4 x i2> constant (encoded as an integer immediate).
1789//
1790// It is therefore possible to lower into SHF when the mask takes the form:
1791//   <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
1792// When undef's appear they are treated as if they were whatever value is
1793// necessary in order to fit the above form.
1794//
1795// For example:
1796//   %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
1797//                      <8 x i32> <i32 3, i32 2, i32 1, i32 0,
1798//                                 i32 7, i32 6, i32 5, i32 4>
1799// is lowered to:
1800//   (SHF_H $w0, $w1, 27)
1801// where the 27 comes from:
1802//   3 + (2 << 2) + (1 << 4) + (0 << 6)
1803static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
1804                                       SmallVector<int, 16> Indices,
1805                                       SelectionDAG &DAG) {
1806  int SHFIndices[4] = { -1, -1, -1, -1 };
1807
1808  if (Indices.size() < 4)
1809    return SDValue();
1810
1811  for (unsigned i = 0; i < 4; ++i) {
1812    for (unsigned j = i; j < Indices.size(); j += 4) {
1813      int Idx = Indices[j];
1814
1815      // Convert from vector index to 4-element subvector index
1816      // If an index refers to an element outside of the subvector then give up
1817      if (Idx != -1) {
1818        Idx -= 4 * (j / 4);
1819        if (Idx < 0 || Idx >= 4)
1820          return SDValue();
1821      }
1822
1823      // If the mask has an undef, replace it with the current index.
1824      // Note that it might still be undef if the current index is also undef
1825      if (SHFIndices[i] == -1)
1826        SHFIndices[i] = Idx;
1827
1828      // Check that non-undef values are the same as in the mask. If they
1829      // aren't then give up
1830      if (!(Idx == -1 || Idx == SHFIndices[i]))
1831        return SDValue();
1832    }
1833  }
1834
1835  // Calculate the immediate. Replace any remaining undefs with zero
1836  APInt Imm(32, 0);
1837  for (int i = 3; i >= 0; --i) {
1838    int Idx = SHFIndices[i];
1839
1840    if (Idx == -1)
1841      Idx = 0;
1842
1843    Imm <<= 2;
1844    Imm |= Idx & 0x3;
1845  }
1846
1847  return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
1848                     DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
1849}
1850
1851// Lower VECTOR_SHUFFLE into ILVEV (if possible).
1852//
1853// ILVEV interleaves the even elements from each vector.
1854//
1855// It is possible to lower into ILVEV when the mask takes the form:
1856//   <0, n, 2, n+2, 4, n+4, ...>
1857// where n is the number of elements in the vector.
1858//
1859// When undef's appear in the mask they are treated as if they were whatever
1860// value is necessary in order to fit the above form.
1861static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy,
1862                                         SmallVector<int, 16> Indices,
1863                                         SelectionDAG &DAG) {
1864  assert ((Indices.size() % 2) == 0);
1865  int WsIdx = 0;
1866  int WtIdx = ResTy.getVectorNumElements();
1867
1868  for (unsigned i = 0; i < Indices.size(); i += 2) {
1869    if (Indices[i] != -1 && Indices[i] != WsIdx)
1870      return SDValue();
1871    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1872      return SDValue();
1873    WsIdx += 2;
1874    WtIdx += 2;
1875  }
1876
1877  return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0),
1878                     Op->getOperand(1));
1879}
1880
1881// Lower VECTOR_SHUFFLE into ILVOD (if possible).
1882//
1883// ILVOD interleaves the odd elements from each vector.
1884//
1885// It is possible to lower into ILVOD when the mask takes the form:
1886//   <1, n+1, 3, n+3, 5, n+5, ...>
1887// where n is the number of elements in the vector.
1888//
1889// When undef's appear in the mask they are treated as if they were whatever
1890// value is necessary in order to fit the above form.
1891static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy,
1892                                         SmallVector<int, 16> Indices,
1893                                         SelectionDAG &DAG) {
1894  assert ((Indices.size() % 2) == 0);
1895  int WsIdx = 1;
1896  int WtIdx = ResTy.getVectorNumElements() + 1;
1897
1898  for (unsigned i = 0; i < Indices.size(); i += 2) {
1899    if (Indices[i] != -1 && Indices[i] != WsIdx)
1900      return SDValue();
1901    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1902      return SDValue();
1903    WsIdx += 2;
1904    WtIdx += 2;
1905  }
1906
1907  return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0),
1908                     Op->getOperand(1));
1909}
1910
1911// Lower VECTOR_SHUFFLE into ILVL (if possible).
1912//
1913// ILVL interleaves consecutive elements from the left half of each vector.
1914//
1915// It is possible to lower into ILVL when the mask takes the form:
1916//   <0, n, 1, n+1, 2, n+2, ...>
1917// where n is the number of elements in the vector.
1918//
1919// When undef's appear in the mask they are treated as if they were whatever
1920// value is necessary in order to fit the above form.
1921static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
1922                                        SmallVector<int, 16> Indices,
1923                                        SelectionDAG &DAG) {
1924  assert ((Indices.size() % 2) == 0);
1925  int WsIdx = 0;
1926  int WtIdx = ResTy.getVectorNumElements();
1927
1928  for (unsigned i = 0; i < Indices.size(); i += 2) {
1929    if (Indices[i] != -1 && Indices[i] != WsIdx)
1930      return SDValue();
1931    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1932      return SDValue();
1933    WsIdx ++;
1934    WtIdx ++;
1935  }
1936
1937  return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0),
1938                     Op->getOperand(1));
1939}
1940
1941// Lower VECTOR_SHUFFLE into ILVR (if possible).
1942//
1943// ILVR interleaves consecutive elements from the right half of each vector.
1944//
1945// It is possible to lower into ILVR when the mask takes the form:
1946//   <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
1947// where n is the number of elements in the vector and x is half n.
1948//
1949// When undef's appear in the mask they are treated as if they were whatever
1950// value is necessary in order to fit the above form.
1951static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
1952                                        SmallVector<int, 16> Indices,
1953                                        SelectionDAG &DAG) {
1954  assert ((Indices.size() % 2) == 0);
1955  unsigned NumElts = ResTy.getVectorNumElements();
1956  int WsIdx = NumElts / 2;
1957  int WtIdx = NumElts + NumElts / 2;
1958
1959  for (unsigned i = 0; i < Indices.size(); i += 2) {
1960    if (Indices[i] != -1 && Indices[i] != WsIdx)
1961      return SDValue();
1962    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1963      return SDValue();
1964    WsIdx ++;
1965    WtIdx ++;
1966  }
1967
1968  return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0),
1969                     Op->getOperand(1));
1970}
1971
1972// Lower VECTOR_SHUFFLE into PCKEV (if possible).
1973//
1974// PCKEV copies the even elements of each vector into the result vector.
1975//
1976// It is possible to lower into PCKEV when the mask takes the form:
1977//   <0, 2, 4, ..., n, n+2, n+4, ...>
1978// where n is the number of elements in the vector.
1979//
1980// When undef's appear in the mask they are treated as if they were whatever
1981// value is necessary in order to fit the above form.
1982static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy,
1983                                         SmallVector<int, 16> Indices,
1984                                         SelectionDAG &DAG) {
1985  assert ((Indices.size() % 2) == 0);
1986  int Idx = 0;
1987
1988  for (unsigned i = 0; i < Indices.size(); ++i) {
1989    if (Indices[i] != -1 && Indices[i] != Idx)
1990      return SDValue();
1991    Idx += 2;
1992  }
1993
1994  return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0),
1995                     Op->getOperand(1));
1996}
1997
1998// Lower VECTOR_SHUFFLE into PCKOD (if possible).
1999//
2000// PCKOD copies the odd elements of each vector into the result vector.
2001//
2002// It is possible to lower into PCKOD when the mask takes the form:
2003//   <1, 3, 5, ..., n+1, n+3, n+5, ...>
2004// where n is the number of elements in the vector.
2005//
2006// When undef's appear in the mask they are treated as if they were whatever
2007// value is necessary in order to fit the above form.
2008static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
2009                                         SmallVector<int, 16> Indices,
2010                                         SelectionDAG &DAG) {
2011  assert ((Indices.size() % 2) == 0);
2012  int Idx = 1;
2013
2014  for (unsigned i = 0; i < Indices.size(); ++i) {
2015    if (Indices[i] != -1 && Indices[i] != Idx)
2016      return SDValue();
2017    Idx += 2;
2018  }
2019
2020  return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0),
2021                     Op->getOperand(1));
2022}
2023
2024// Lower VECTOR_SHUFFLE into VSHF.
2025//
2026// This mostly consists of converting the shuffle indices in Indices into a
2027// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is
2028// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example,
2029// if the type is v8i16 and all the indices are less than 8 then the second
2030// operand is unused and can be replaced with anything. We choose to replace it
2031// with the used operand since this reduces the number of instructions overall.
2032static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
2033                                        SmallVector<int, 16> Indices,
2034                                        SelectionDAG &DAG) {
2035  SmallVector<SDValue, 16> Ops;
2036  SDValue Op0;
2037  SDValue Op1;
2038  EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger();
2039  EVT MaskEltTy = MaskVecTy.getVectorElementType();
2040  bool Using1stVec = false;
2041  bool Using2ndVec = false;
2042  SDLoc DL(Op);
2043  int ResTyNumElts = ResTy.getVectorNumElements();
2044
2045  for (int i = 0; i < ResTyNumElts; ++i) {
2046    // Idx == -1 means UNDEF
2047    int Idx = Indices[i];
2048
2049    if (0 <= Idx && Idx < ResTyNumElts)
2050      Using1stVec = true;
2051    if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2)
2052      Using2ndVec = true;
2053  }
2054
2055  for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
2056       ++I)
2057    Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
2058
2059  SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
2060                                Ops.size());
2061
2062  if (Using1stVec && Using2ndVec) {
2063    Op0 = Op->getOperand(0);
2064    Op1 = Op->getOperand(1);
2065  } else if (Using1stVec)
2066    Op0 = Op1 = Op->getOperand(0);
2067  else if (Using2ndVec)
2068    Op0 = Op1 = Op->getOperand(1);
2069  else
2070    llvm_unreachable("shuffle vector mask references neither vector operand?");
2071
2072  return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
2073}
2074
2075// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
2076// indices in the shuffle.
2077SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
2078                                                  SelectionDAG &DAG) const {
2079  ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op);
2080  EVT ResTy = Op->getValueType(0);
2081
2082  if (!ResTy.is128BitVector())
2083    return SDValue();
2084
2085  int ResTyNumElts = ResTy.getVectorNumElements();
2086  SmallVector<int, 16> Indices;
2087
2088  for (int i = 0; i < ResTyNumElts; ++i)
2089    Indices.push_back(Node->getMaskElt(i));
2090
2091  SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
2092  if (Result.getNode())
2093    return Result;
2094  Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
2095  if (Result.getNode())
2096    return Result;
2097  Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
2098  if (Result.getNode())
2099    return Result;
2100  Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
2101  if (Result.getNode())
2102    return Result;
2103  Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
2104  if (Result.getNode())
2105    return Result;
2106  Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
2107  if (Result.getNode())
2108    return Result;
2109  Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
2110  if (Result.getNode())
2111    return Result;
2112  return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
2113}
2114
2115MachineBasicBlock * MipsSETargetLowering::
2116emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
2117  // $bb:
2118  //  bposge32_pseudo $vr0
2119  //  =>
2120  // $bb:
2121  //  bposge32 $tbb
2122  // $fbb:
2123  //  li $vr2, 0
2124  //  b $sink
2125  // $tbb:
2126  //  li $vr1, 1
2127  // $sink:
2128  //  $vr0 = phi($vr2, $fbb, $vr1, $tbb)
2129
2130  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2131  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2132  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2133  DebugLoc DL = MI->getDebugLoc();
2134  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2135  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2136  MachineFunction *F = BB->getParent();
2137  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2138  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2139  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2140  F->insert(It, FBB);
2141  F->insert(It, TBB);
2142  F->insert(It, Sink);
2143
2144  // Transfer the remainder of BB and its successor edges to Sink.
2145  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2146               BB->end());
2147  Sink->transferSuccessorsAndUpdatePHIs(BB);
2148
2149  // Add successors.
2150  BB->addSuccessor(FBB);
2151  BB->addSuccessor(TBB);
2152  FBB->addSuccessor(Sink);
2153  TBB->addSuccessor(Sink);
2154
2155  // Insert the real bposge32 instruction to $BB.
2156  BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB);
2157
2158  // Fill $FBB.
2159  unsigned VR2 = RegInfo.createVirtualRegister(RC);
2160  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2)
2161    .addReg(Mips::ZERO).addImm(0);
2162  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2163
2164  // Fill $TBB.
2165  unsigned VR1 = RegInfo.createVirtualRegister(RC);
2166  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1)
2167    .addReg(Mips::ZERO).addImm(1);
2168
2169  // Insert phi function to $Sink.
2170  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2171          MI->getOperand(0).getReg())
2172    .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB);
2173
2174  MI->eraseFromParent();   // The pseudo instruction is gone now.
2175  return Sink;
2176}
2177
2178MachineBasicBlock * MipsSETargetLowering::
2179emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
2180                     unsigned BranchOp) const{
2181  // $bb:
2182  //  vany_nonzero $rd, $ws
2183  //  =>
2184  // $bb:
2185  //  bnz.b $ws, $tbb
2186  //  b $fbb
2187  // $fbb:
2188  //  li $rd1, 0
2189  //  b $sink
2190  // $tbb:
2191  //  li $rd2, 1
2192  // $sink:
2193  //  $rd = phi($rd1, $fbb, $rd2, $tbb)
2194
2195  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2196  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2197  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2198  DebugLoc DL = MI->getDebugLoc();
2199  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2200  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2201  MachineFunction *F = BB->getParent();
2202  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2203  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2204  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2205  F->insert(It, FBB);
2206  F->insert(It, TBB);
2207  F->insert(It, Sink);
2208
2209  // Transfer the remainder of BB and its successor edges to Sink.
2210  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2211               BB->end());
2212  Sink->transferSuccessorsAndUpdatePHIs(BB);
2213
2214  // Add successors.
2215  BB->addSuccessor(FBB);
2216  BB->addSuccessor(TBB);
2217  FBB->addSuccessor(Sink);
2218  TBB->addSuccessor(Sink);
2219
2220  // Insert the real bnz.b instruction to $BB.
2221  BuildMI(BB, DL, TII->get(BranchOp))
2222    .addReg(MI->getOperand(1).getReg())
2223    .addMBB(TBB);
2224
2225  // Fill $FBB.
2226  unsigned RD1 = RegInfo.createVirtualRegister(RC);
2227  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1)
2228    .addReg(Mips::ZERO).addImm(0);
2229  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2230
2231  // Fill $TBB.
2232  unsigned RD2 = RegInfo.createVirtualRegister(RC);
2233  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2)
2234    .addReg(Mips::ZERO).addImm(1);
2235
2236  // Insert phi function to $Sink.
2237  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2238          MI->getOperand(0).getReg())
2239    .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB);
2240
2241  MI->eraseFromParent();   // The pseudo instruction is gone now.
2242  return Sink;
2243}
2244