MipsSEISelLowering.cpp revision b4691b495d867a863aa12de57d45bc6a93e4df78
1//===-- MipsSEISelLowering.cpp - MipsSE DAG Lowering Interface --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Subclass of MipsTargetLowering specialized for mips32/64.
11//
12//===----------------------------------------------------------------------===//
13#include "MipsSEISelLowering.h"
14#include "MipsRegisterInfo.h"
15#include "MipsTargetMachine.h"
16#include "llvm/CodeGen/MachineInstrBuilder.h"
17#include "llvm/CodeGen/MachineRegisterInfo.h"
18#include "llvm/IR/Intrinsics.h"
19#include "llvm/Support/CommandLine.h"
20#include "llvm/Target/TargetInstrInfo.h"
21
22using namespace llvm;
23
24static cl::opt<bool>
25EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
26                    cl::desc("MIPS: Enable tail calls."), cl::init(false));
27
28static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false),
29                                   cl::desc("Expand double precision loads and "
30                                            "stores to their single precision "
31                                            "counterparts"));
32
33MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
34  : MipsTargetLowering(TM) {
35  // Set up the register classes
36
37  clearRegisterClasses();
38
39  addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
40
41  if (HasMips64)
42    addRegisterClass(MVT::i64, &Mips::GPR64RegClass);
43
44  if (Subtarget->hasDSP() || Subtarget->hasMSA()) {
45    // Expand all truncating stores and extending loads.
46    unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
47    unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
48
49    for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) {
50      for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1)
51        setTruncStoreAction((MVT::SimpleValueType)VT0,
52                            (MVT::SimpleValueType)VT1, Expand);
53
54      setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
55      setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
56      setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand);
57    }
58  }
59
60  if (Subtarget->hasDSP()) {
61    MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
62
63    for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
64      addRegisterClass(VecTys[i], &Mips::DSPRRegClass);
65
66      // Expand all builtin opcodes.
67      for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
68        setOperationAction(Opc, VecTys[i], Expand);
69
70      setOperationAction(ISD::ADD, VecTys[i], Legal);
71      setOperationAction(ISD::SUB, VecTys[i], Legal);
72      setOperationAction(ISD::LOAD, VecTys[i], Legal);
73      setOperationAction(ISD::STORE, VecTys[i], Legal);
74      setOperationAction(ISD::BITCAST, VecTys[i], Legal);
75    }
76
77    setTargetDAGCombine(ISD::SHL);
78    setTargetDAGCombine(ISD::SRA);
79    setTargetDAGCombine(ISD::SRL);
80    setTargetDAGCombine(ISD::SETCC);
81    setTargetDAGCombine(ISD::VSELECT);
82  }
83
84  if (Subtarget->hasDSPR2())
85    setOperationAction(ISD::MUL, MVT::v2i16, Legal);
86
87  if (Subtarget->hasMSA()) {
88    addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass);
89    addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass);
90    addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass);
91    addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass);
92    addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass);
93    addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass);
94    addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass);
95
96    setTargetDAGCombine(ISD::AND);
97    setTargetDAGCombine(ISD::SRA);
98    setTargetDAGCombine(ISD::VSELECT);
99    setTargetDAGCombine(ISD::XOR);
100  }
101
102  if (!Subtarget->mipsSEUsesSoftFloat()) {
103    addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
104
105    // When dealing with single precision only, use libcalls
106    if (!Subtarget->isSingleFloat()) {
107      if (Subtarget->isFP64bit())
108        addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
109      else
110        addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
111    }
112  }
113
114  setOperationAction(ISD::SMUL_LOHI,          MVT::i32, Custom);
115  setOperationAction(ISD::UMUL_LOHI,          MVT::i32, Custom);
116  setOperationAction(ISD::MULHS,              MVT::i32, Custom);
117  setOperationAction(ISD::MULHU,              MVT::i32, Custom);
118
119  if (HasMips64) {
120    setOperationAction(ISD::MULHS,            MVT::i64, Custom);
121    setOperationAction(ISD::MULHU,            MVT::i64, Custom);
122    setOperationAction(ISD::MUL,              MVT::i64, Custom);
123  }
124
125  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
126  setOperationAction(ISD::INTRINSIC_W_CHAIN,  MVT::i64, Custom);
127
128  setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
129  setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
130  setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
131  setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
132  setOperationAction(ISD::ATOMIC_FENCE,       MVT::Other, Custom);
133  setOperationAction(ISD::LOAD,               MVT::i32, Custom);
134  setOperationAction(ISD::STORE,              MVT::i32, Custom);
135
136  setTargetDAGCombine(ISD::ADDE);
137  setTargetDAGCombine(ISD::SUBE);
138  setTargetDAGCombine(ISD::MUL);
139
140  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
141  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
142  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
143
144  if (NoDPLoadStore) {
145    setOperationAction(ISD::LOAD, MVT::f64, Custom);
146    setOperationAction(ISD::STORE, MVT::f64, Custom);
147  }
148
149  computeRegisterProperties();
150}
151
152const MipsTargetLowering *
153llvm::createMipsSETargetLowering(MipsTargetMachine &TM) {
154  return new MipsSETargetLowering(TM);
155}
156
157// Enable MSA support for the given integer type and Register class.
158void MipsSETargetLowering::
159addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
160  addRegisterClass(Ty, RC);
161
162  // Expand all builtin opcodes.
163  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
164    setOperationAction(Opc, Ty, Expand);
165
166  setOperationAction(ISD::BITCAST, Ty, Legal);
167  setOperationAction(ISD::LOAD, Ty, Legal);
168  setOperationAction(ISD::STORE, Ty, Legal);
169  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom);
170  setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
171  setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
172
173  setOperationAction(ISD::ADD, Ty, Legal);
174  setOperationAction(ISD::AND, Ty, Legal);
175  setOperationAction(ISD::CTLZ, Ty, Legal);
176  setOperationAction(ISD::CTPOP, Ty, Legal);
177  setOperationAction(ISD::MUL, Ty, Legal);
178  setOperationAction(ISD::OR, Ty, Legal);
179  setOperationAction(ISD::SDIV, Ty, Legal);
180  setOperationAction(ISD::SHL, Ty, Legal);
181  setOperationAction(ISD::SRA, Ty, Legal);
182  setOperationAction(ISD::SRL, Ty, Legal);
183  setOperationAction(ISD::SUB, Ty, Legal);
184  setOperationAction(ISD::UDIV, Ty, Legal);
185  setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom);
186  setOperationAction(ISD::VSELECT, Ty, Legal);
187  setOperationAction(ISD::XOR, Ty, Legal);
188
189  setOperationAction(ISD::SETCC, Ty, Legal);
190  setCondCodeAction(ISD::SETNE, Ty, Expand);
191  setCondCodeAction(ISD::SETGE, Ty, Expand);
192  setCondCodeAction(ISD::SETGT, Ty, Expand);
193  setCondCodeAction(ISD::SETUGE, Ty, Expand);
194  setCondCodeAction(ISD::SETUGT, Ty, Expand);
195}
196
197// Enable MSA support for the given floating-point type and Register class.
198void MipsSETargetLowering::
199addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
200  addRegisterClass(Ty, RC);
201
202  // Expand all builtin opcodes.
203  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
204    setOperationAction(Opc, Ty, Expand);
205
206  setOperationAction(ISD::LOAD, Ty, Legal);
207  setOperationAction(ISD::STORE, Ty, Legal);
208  setOperationAction(ISD::BITCAST, Ty, Legal);
209  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal);
210
211  if (Ty != MVT::v8f16) {
212    setOperationAction(ISD::FABS,  Ty, Legal);
213    setOperationAction(ISD::FADD,  Ty, Legal);
214    setOperationAction(ISD::FDIV,  Ty, Legal);
215    setOperationAction(ISD::FLOG2, Ty, Legal);
216    setOperationAction(ISD::FMUL,  Ty, Legal);
217    setOperationAction(ISD::FRINT, Ty, Legal);
218    setOperationAction(ISD::FSQRT, Ty, Legal);
219    setOperationAction(ISD::FSUB,  Ty, Legal);
220    setOperationAction(ISD::VSELECT, Ty, Legal);
221
222    setOperationAction(ISD::SETCC, Ty, Legal);
223    setCondCodeAction(ISD::SETOGE, Ty, Expand);
224    setCondCodeAction(ISD::SETOGT, Ty, Expand);
225    setCondCodeAction(ISD::SETUGE, Ty, Expand);
226    setCondCodeAction(ISD::SETUGT, Ty, Expand);
227    setCondCodeAction(ISD::SETGE,  Ty, Expand);
228    setCondCodeAction(ISD::SETGT,  Ty, Expand);
229  }
230}
231
232bool
233MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
234  MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
235
236  switch (SVT) {
237  case MVT::i64:
238  case MVT::i32:
239    if (Fast)
240      *Fast = true;
241    return true;
242  default:
243    return false;
244  }
245}
246
247SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
248                                             SelectionDAG &DAG) const {
249  switch(Op.getOpcode()) {
250  case ISD::LOAD:  return lowerLOAD(Op, DAG);
251  case ISD::STORE: return lowerSTORE(Op, DAG);
252  case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG);
253  case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG);
254  case ISD::MULHS:     return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG);
255  case ISD::MULHU:     return lowerMulDiv(Op, MipsISD::Multu, false, true, DAG);
256  case ISD::MUL:       return lowerMulDiv(Op, MipsISD::Mult, true, false, DAG);
257  case ISD::SDIVREM:   return lowerMulDiv(Op, MipsISD::DivRem, true, true, DAG);
258  case ISD::UDIVREM:   return lowerMulDiv(Op, MipsISD::DivRemU, true, true,
259                                          DAG);
260  case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG);
261  case ISD::INTRINSIC_W_CHAIN:  return lowerINTRINSIC_W_CHAIN(Op, DAG);
262  case ISD::INTRINSIC_VOID:     return lowerINTRINSIC_VOID(Op, DAG);
263  case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG);
264  case ISD::BUILD_VECTOR:       return lowerBUILD_VECTOR(Op, DAG);
265  case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, DAG);
266  }
267
268  return MipsTargetLowering::LowerOperation(Op, DAG);
269}
270
271// selectMADD -
272// Transforms a subgraph in CurDAG if the following pattern is found:
273//  (addc multLo, Lo0), (adde multHi, Hi0),
274// where,
275//  multHi/Lo: product of multiplication
276//  Lo0: initial value of Lo register
277//  Hi0: initial value of Hi register
278// Return true if pattern matching was successful.
279static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
280  // ADDENode's second operand must be a flag output of an ADDC node in order
281  // for the matching to be successful.
282  SDNode *ADDCNode = ADDENode->getOperand(2).getNode();
283
284  if (ADDCNode->getOpcode() != ISD::ADDC)
285    return false;
286
287  SDValue MultHi = ADDENode->getOperand(0);
288  SDValue MultLo = ADDCNode->getOperand(0);
289  SDNode *MultNode = MultHi.getNode();
290  unsigned MultOpc = MultHi.getOpcode();
291
292  // MultHi and MultLo must be generated by the same node,
293  if (MultLo.getNode() != MultNode)
294    return false;
295
296  // and it must be a multiplication.
297  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
298    return false;
299
300  // MultLo amd MultHi must be the first and second output of MultNode
301  // respectively.
302  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
303    return false;
304
305  // Transform this to a MADD only if ADDENode and ADDCNode are the only users
306  // of the values of MultNode, in which case MultNode will be removed in later
307  // phases.
308  // If there exist users other than ADDENode or ADDCNode, this function returns
309  // here, which will result in MultNode being mapped to a single MULT
310  // instruction node rather than a pair of MULT and MADD instructions being
311  // produced.
312  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
313    return false;
314
315  SDLoc DL(ADDENode);
316
317  // Initialize accumulator.
318  SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
319                                  ADDCNode->getOperand(1),
320                                  ADDENode->getOperand(1));
321
322  // create MipsMAdd(u) node
323  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd;
324
325  SDValue MAdd = CurDAG->getNode(MultOpc, DL, MVT::Untyped,
326                                 MultNode->getOperand(0),// Factor 0
327                                 MultNode->getOperand(1),// Factor 1
328                                 ACCIn);
329
330  // replace uses of adde and addc here
331  if (!SDValue(ADDCNode, 0).use_empty()) {
332    SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
333    SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
334                                    LoIdx);
335    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut);
336  }
337  if (!SDValue(ADDENode, 0).use_empty()) {
338    SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
339    SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
340                                    HiIdx);
341    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut);
342  }
343
344  return true;
345}
346
347// selectMSUB -
348// Transforms a subgraph in CurDAG if the following pattern is found:
349//  (addc Lo0, multLo), (sube Hi0, multHi),
350// where,
351//  multHi/Lo: product of multiplication
352//  Lo0: initial value of Lo register
353//  Hi0: initial value of Hi register
354// Return true if pattern matching was successful.
355static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
356  // SUBENode's second operand must be a flag output of an SUBC node in order
357  // for the matching to be successful.
358  SDNode *SUBCNode = SUBENode->getOperand(2).getNode();
359
360  if (SUBCNode->getOpcode() != ISD::SUBC)
361    return false;
362
363  SDValue MultHi = SUBENode->getOperand(1);
364  SDValue MultLo = SUBCNode->getOperand(1);
365  SDNode *MultNode = MultHi.getNode();
366  unsigned MultOpc = MultHi.getOpcode();
367
368  // MultHi and MultLo must be generated by the same node,
369  if (MultLo.getNode() != MultNode)
370    return false;
371
372  // and it must be a multiplication.
373  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
374    return false;
375
376  // MultLo amd MultHi must be the first and second output of MultNode
377  // respectively.
378  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
379    return false;
380
381  // Transform this to a MSUB only if SUBENode and SUBCNode are the only users
382  // of the values of MultNode, in which case MultNode will be removed in later
383  // phases.
384  // If there exist users other than SUBENode or SUBCNode, this function returns
385  // here, which will result in MultNode being mapped to a single MULT
386  // instruction node rather than a pair of MULT and MSUB instructions being
387  // produced.
388  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
389    return false;
390
391  SDLoc DL(SUBENode);
392
393  // Initialize accumulator.
394  SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
395                                  SUBCNode->getOperand(0),
396                                  SUBENode->getOperand(0));
397
398  // create MipsSub(u) node
399  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub;
400
401  SDValue MSub = CurDAG->getNode(MultOpc, DL, MVT::Glue,
402                                 MultNode->getOperand(0),// Factor 0
403                                 MultNode->getOperand(1),// Factor 1
404                                 ACCIn);
405
406  // replace uses of sube and subc here
407  if (!SDValue(SUBCNode, 0).use_empty()) {
408    SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
409    SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
410                                    LoIdx);
411    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut);
412  }
413  if (!SDValue(SUBENode, 0).use_empty()) {
414    SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
415    SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
416                                    HiIdx);
417    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut);
418  }
419
420  return true;
421}
422
423static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
424                                  TargetLowering::DAGCombinerInfo &DCI,
425                                  const MipsSubtarget *Subtarget) {
426  if (DCI.isBeforeLegalize())
427    return SDValue();
428
429  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
430      selectMADD(N, &DAG))
431    return SDValue(N, 0);
432
433  return SDValue();
434}
435
436// Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT
437//
438// Performs the following transformations:
439// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its
440//   sign/zero-extension is completely overwritten by the new one performed by
441//   the ISD::AND.
442// - Removes redundant zero extensions performed by an ISD::AND.
443static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
444                                 TargetLowering::DAGCombinerInfo &DCI,
445                                 const MipsSubtarget *Subtarget) {
446  if (!Subtarget->hasMSA())
447    return SDValue();
448
449  SDValue Op0 = N->getOperand(0);
450  SDValue Op1 = N->getOperand(1);
451  unsigned Op0Opcode = Op0->getOpcode();
452
453  // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d)
454  // where $d + 1 == 2^n and n == 32
455  // or    $d + 1 == 2^n and n <= 32 and ZExt
456  // -> (MipsVExtractZExt $a, $b, $c)
457  if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT ||
458      Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) {
459    ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1);
460
461    if (!Mask)
462      return SDValue();
463
464    int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2();
465
466    if (Log2IfPositive <= 0)
467      return SDValue(); // Mask+1 is not a power of 2
468
469    SDValue Op0Op2 = Op0->getOperand(2);
470    EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT();
471    unsigned ExtendTySize = ExtendTy.getSizeInBits();
472    unsigned Log2 = Log2IfPositive;
473
474    if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) ||
475        Log2 == ExtendTySize) {
476      SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 };
477      DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT,
478                      Op0->getVTList(), Ops, Op0->getNumOperands());
479      return Op0;
480    }
481  }
482
483  return SDValue();
484}
485
486static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
487                                  TargetLowering::DAGCombinerInfo &DCI,
488                                  const MipsSubtarget *Subtarget) {
489  if (DCI.isBeforeLegalize())
490    return SDValue();
491
492  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
493      selectMSUB(N, &DAG))
494    return SDValue(N, 0);
495
496  return SDValue();
497}
498
499static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
500                            EVT ShiftTy, SelectionDAG &DAG) {
501  // Clear the upper (64 - VT.sizeInBits) bits.
502  C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits());
503
504  // Return 0.
505  if (C == 0)
506    return DAG.getConstant(0, VT);
507
508  // Return x.
509  if (C == 1)
510    return X;
511
512  // If c is power of 2, return (shl x, log2(c)).
513  if (isPowerOf2_64(C))
514    return DAG.getNode(ISD::SHL, DL, VT, X,
515                       DAG.getConstant(Log2_64(C), ShiftTy));
516
517  unsigned Log2Ceil = Log2_64_Ceil(C);
518  uint64_t Floor = 1LL << Log2_64(C);
519  uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil;
520
521  // If |c - floor_c| <= |c - ceil_c|,
522  // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))),
523  // return (add constMult(x, floor_c), constMult(x, c - floor_c)).
524  if (C - Floor <= Ceil - C) {
525    SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG);
526    SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG);
527    return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
528  }
529
530  // If |c - floor_c| > |c - ceil_c|,
531  // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)).
532  SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG);
533  SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG);
534  return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
535}
536
537static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
538                                 const TargetLowering::DAGCombinerInfo &DCI,
539                                 const MipsSETargetLowering *TL) {
540  EVT VT = N->getValueType(0);
541
542  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
543    if (!VT.isVector())
544      return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N),
545                          VT, TL->getScalarShiftAmountTy(VT), DAG);
546
547  return SDValue(N, 0);
548}
549
550static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
551                                      SelectionDAG &DAG,
552                                      const MipsSubtarget *Subtarget) {
553  // See if this is a vector splat immediate node.
554  APInt SplatValue, SplatUndef;
555  unsigned SplatBitSize;
556  bool HasAnyUndefs;
557  unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
558  BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
559
560  if (!BV ||
561      !BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
562                           EltSize, !Subtarget->isLittle()) ||
563      (SplatBitSize != EltSize) ||
564      (SplatValue.getZExtValue() >= EltSize))
565    return SDValue();
566
567  return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
568                     DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
569}
570
571static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
572                                 TargetLowering::DAGCombinerInfo &DCI,
573                                 const MipsSubtarget *Subtarget) {
574  EVT Ty = N->getValueType(0);
575
576  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
577    return SDValue();
578
579  return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget);
580}
581
582// Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold
583// constant splats into MipsISD::SHRA_DSP for DSPr2.
584//
585// Performs the following transformations:
586// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its
587//   sign/zero-extension is completely overwritten by the new one performed by
588//   the ISD::SRA and ISD::SHL nodes.
589// - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL
590//   sequence.
591//
592// See performDSPShiftCombine for more information about the transformation
593// used for DSPr2.
594static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
595                                 TargetLowering::DAGCombinerInfo &DCI,
596                                 const MipsSubtarget *Subtarget) {
597  EVT Ty = N->getValueType(0);
598
599  if (Subtarget->hasMSA()) {
600    SDValue Op0 = N->getOperand(0);
601    SDValue Op1 = N->getOperand(1);
602
603    // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d)
604    // where $d + sizeof($c) == 32
605    // or    $d + sizeof($c) <= 32 and SExt
606    // -> (MipsVExtractSExt $a, $b, $c)
607    if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) {
608      SDValue Op0Op0 = Op0->getOperand(0);
609      ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1);
610
611      if (!ShAmount)
612        return SDValue();
613
614      if (Op0Op0->getOpcode() != MipsISD::VEXTRACT_SEXT_ELT &&
615          Op0Op0->getOpcode() != MipsISD::VEXTRACT_ZEXT_ELT)
616        return SDValue();
617
618      EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT();
619      unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits();
620
621      if (TotalBits == 32 ||
622          (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT &&
623           TotalBits <= 32)) {
624        SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1),
625                          Op0Op0->getOperand(2) };
626        DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT,
627                        Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands());
628        return Op0Op0;
629      }
630    }
631  }
632
633  if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2()))
634    return SDValue();
635
636  return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget);
637}
638
639
640static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG,
641                                 TargetLowering::DAGCombinerInfo &DCI,
642                                 const MipsSubtarget *Subtarget) {
643  EVT Ty = N->getValueType(0);
644
645  if (((Ty != MVT::v2i16) || !Subtarget->hasDSPR2()) && (Ty != MVT::v4i8))
646    return SDValue();
647
648  return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget);
649}
650
651static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC) {
652  bool IsV216 = (Ty == MVT::v2i16);
653
654  switch (CC) {
655  case ISD::SETEQ:
656  case ISD::SETNE:  return true;
657  case ISD::SETLT:
658  case ISD::SETLE:
659  case ISD::SETGT:
660  case ISD::SETGE:  return IsV216;
661  case ISD::SETULT:
662  case ISD::SETULE:
663  case ISD::SETUGT:
664  case ISD::SETUGE: return !IsV216;
665  default:          return false;
666  }
667}
668
669static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
670  EVT Ty = N->getValueType(0);
671
672  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
673    return SDValue();
674
675  if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get()))
676    return SDValue();
677
678  return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0),
679                     N->getOperand(1), N->getOperand(2));
680}
681
682static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
683  EVT Ty = N->getValueType(0);
684
685  if (Ty.is128BitVector() && Ty.isInteger()) {
686    // Try the following combines:
687    //   (vselect (setcc $a, $b, SETLT), $b, $a)) -> (vsmax $a, $b)
688    //   (vselect (setcc $a, $b, SETLE), $b, $a)) -> (vsmax $a, $b)
689    //   (vselect (setcc $a, $b, SETLT), $a, $b)) -> (vsmin $a, $b)
690    //   (vselect (setcc $a, $b, SETLE), $a, $b)) -> (vsmin $a, $b)
691    //   (vselect (setcc $a, $b, SETULT), $b, $a)) -> (vumax $a, $b)
692    //   (vselect (setcc $a, $b, SETULE), $b, $a)) -> (vumax $a, $b)
693    //   (vselect (setcc $a, $b, SETULT), $a, $b)) -> (vumin $a, $b)
694    //   (vselect (setcc $a, $b, SETULE), $a, $b)) -> (vumin $a, $b)
695    // SETGT/SETGE/SETUGT/SETUGE variants of these will show up initially but
696    // will be expanded to equivalent SETLT/SETLE/SETULT/SETULE versions by the
697    // legalizer.
698    SDValue Op0 = N->getOperand(0);
699
700    if (Op0->getOpcode() != ISD::SETCC)
701      return SDValue();
702
703    ISD::CondCode CondCode = cast<CondCodeSDNode>(Op0->getOperand(2))->get();
704    bool Signed;
705
706    if (CondCode == ISD::SETLT  || CondCode == ISD::SETLE)
707      Signed = true;
708    else if (CondCode == ISD::SETULT || CondCode == ISD::SETULE)
709      Signed = false;
710    else
711      return SDValue();
712
713    SDValue Op1 = N->getOperand(1);
714    SDValue Op2 = N->getOperand(2);
715    SDValue Op0Op0 = Op0->getOperand(0);
716    SDValue Op0Op1 = Op0->getOperand(1);
717
718    if (Op1 == Op0Op0 && Op2 == Op0Op1)
719      return DAG.getNode(Signed ? MipsISD::VSMIN : MipsISD::VUMIN, SDLoc(N),
720                         Ty, Op1, Op2);
721    else if (Op1 == Op0Op1 && Op2 == Op0Op0)
722      return DAG.getNode(Signed ? MipsISD::VSMAX : MipsISD::VUMAX, SDLoc(N),
723                         Ty, Op1, Op2);
724  } else if ((Ty == MVT::v2i16) || (Ty == MVT::v4i8)) {
725    SDValue SetCC = N->getOperand(0);
726
727    if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
728      return SDValue();
729
730    return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty,
731                       SetCC.getOperand(0), SetCC.getOperand(1),
732                       N->getOperand(1), N->getOperand(2), SetCC.getOperand(2));
733  }
734
735  return SDValue();
736}
737
738static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
739                                 const MipsSubtarget *Subtarget) {
740  EVT Ty = N->getValueType(0);
741
742  if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
743    // Try the following combines:
744    //   (xor (or $a, $b), (build_vector allones))
745    //   (xor (or $a, $b), (bitcast (build_vector allones)))
746    SDValue Op0 = N->getOperand(0);
747    SDValue Op1 = N->getOperand(1);
748    SDValue NotOp;
749
750    if (ISD::isBuildVectorAllOnes(Op0.getNode()))
751      NotOp = Op1;
752    else if (ISD::isBuildVectorAllOnes(Op1.getNode()))
753      NotOp = Op0;
754    else
755      return SDValue();
756
757    if (NotOp->getOpcode() == ISD::OR)
758      return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0),
759                         NotOp->getOperand(1));
760  }
761
762  return SDValue();
763}
764
765SDValue
766MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
767  SelectionDAG &DAG = DCI.DAG;
768  SDValue Val;
769
770  switch (N->getOpcode()) {
771  case ISD::ADDE:
772    return performADDECombine(N, DAG, DCI, Subtarget);
773  case ISD::AND:
774    Val = performANDCombine(N, DAG, DCI, Subtarget);
775    break;
776  case ISD::SUBE:
777    return performSUBECombine(N, DAG, DCI, Subtarget);
778  case ISD::MUL:
779    return performMULCombine(N, DAG, DCI, this);
780  case ISD::SHL:
781    return performSHLCombine(N, DAG, DCI, Subtarget);
782  case ISD::SRA:
783    return performSRACombine(N, DAG, DCI, Subtarget);
784  case ISD::SRL:
785    return performSRLCombine(N, DAG, DCI, Subtarget);
786  case ISD::VSELECT:
787    return performVSELECTCombine(N, DAG);
788  case ISD::XOR:
789    Val = performXORCombine(N, DAG, Subtarget);
790    break;
791  case ISD::SETCC:
792    Val = performSETCCCombine(N, DAG);
793    break;
794  }
795
796  if (Val.getNode())
797    return Val;
798
799  return MipsTargetLowering::PerformDAGCombine(N, DCI);
800}
801
802MachineBasicBlock *
803MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
804                                                  MachineBasicBlock *BB) const {
805  switch (MI->getOpcode()) {
806  default:
807    return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
808  case Mips::BPOSGE32_PSEUDO:
809    return emitBPOSGE32(MI, BB);
810  case Mips::SNZ_B_PSEUDO:
811    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B);
812  case Mips::SNZ_H_PSEUDO:
813    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H);
814  case Mips::SNZ_W_PSEUDO:
815    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W);
816  case Mips::SNZ_D_PSEUDO:
817    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D);
818  case Mips::SNZ_V_PSEUDO:
819    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V);
820  case Mips::SZ_B_PSEUDO:
821    return emitMSACBranchPseudo(MI, BB, Mips::BZ_B);
822  case Mips::SZ_H_PSEUDO:
823    return emitMSACBranchPseudo(MI, BB, Mips::BZ_H);
824  case Mips::SZ_W_PSEUDO:
825    return emitMSACBranchPseudo(MI, BB, Mips::BZ_W);
826  case Mips::SZ_D_PSEUDO:
827    return emitMSACBranchPseudo(MI, BB, Mips::BZ_D);
828  case Mips::SZ_V_PSEUDO:
829    return emitMSACBranchPseudo(MI, BB, Mips::BZ_V);
830  case Mips::COPY_FW_PSEUDO:
831    return emitCOPY_FW(MI, BB);
832  case Mips::COPY_FD_PSEUDO:
833    return emitCOPY_FD(MI, BB);
834  }
835}
836
837bool MipsSETargetLowering::
838isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
839                                  unsigned NextStackOffset,
840                                  const MipsFunctionInfo& FI) const {
841  if (!EnableMipsTailCalls)
842    return false;
843
844  // Return false if either the callee or caller has a byval argument.
845  if (MipsCCInfo.hasByValArg() || FI.hasByvalArg())
846    return false;
847
848  // Return true if the callee's argument area is no larger than the
849  // caller's.
850  return NextStackOffset <= FI.getIncomingArgSize();
851}
852
853void MipsSETargetLowering::
854getOpndList(SmallVectorImpl<SDValue> &Ops,
855            std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
856            bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
857            CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
858  // T9 should contain the address of the callee function if
859  // -reloction-model=pic or it is an indirect call.
860  if (IsPICCall || !GlobalOrExternal) {
861    unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
862    RegsToPass.push_front(std::make_pair(T9Reg, Callee));
863  } else
864    Ops.push_back(Callee);
865
866  MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal,
867                                  InternalLinkage, CLI, Callee, Chain);
868}
869
870SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
871  LoadSDNode &Nd = *cast<LoadSDNode>(Op);
872
873  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
874    return MipsTargetLowering::lowerLOAD(Op, DAG);
875
876  // Replace a double precision load with two i32 loads and a buildpair64.
877  SDLoc DL(Op);
878  SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
879  EVT PtrVT = Ptr.getValueType();
880
881  // i32 load from lower address.
882  SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr,
883                           MachinePointerInfo(), Nd.isVolatile(),
884                           Nd.isNonTemporal(), Nd.isInvariant(),
885                           Nd.getAlignment());
886
887  // i32 load from higher address.
888  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
889  SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
890                           MachinePointerInfo(), Nd.isVolatile(),
891                           Nd.isNonTemporal(), Nd.isInvariant(),
892                           std::min(Nd.getAlignment(), 4U));
893
894  if (!Subtarget->isLittle())
895    std::swap(Lo, Hi);
896
897  SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
898  SDValue Ops[2] = {BP, Hi.getValue(1)};
899  return DAG.getMergeValues(Ops, 2, DL);
900}
901
902SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
903  StoreSDNode &Nd = *cast<StoreSDNode>(Op);
904
905  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
906    return MipsTargetLowering::lowerSTORE(Op, DAG);
907
908  // Replace a double precision store with two extractelement64s and i32 stores.
909  SDLoc DL(Op);
910  SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
911  EVT PtrVT = Ptr.getValueType();
912  SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
913                           Val, DAG.getConstant(0, MVT::i32));
914  SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
915                           Val, DAG.getConstant(1, MVT::i32));
916
917  if (!Subtarget->isLittle())
918    std::swap(Lo, Hi);
919
920  // i32 store to lower address.
921  Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(),
922                       Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(),
923                       Nd.getTBAAInfo());
924
925  // i32 store to higher address.
926  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
927  return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
928                      Nd.isVolatile(), Nd.isNonTemporal(),
929                      std::min(Nd.getAlignment(), 4U), Nd.getTBAAInfo());
930}
931
932SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
933                                          bool HasLo, bool HasHi,
934                                          SelectionDAG &DAG) const {
935  EVT Ty = Op.getOperand(0).getValueType();
936  SDLoc DL(Op);
937  SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped,
938                             Op.getOperand(0), Op.getOperand(1));
939  SDValue Lo, Hi;
940
941  if (HasLo)
942    Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
943                     DAG.getConstant(Mips::sub_lo, MVT::i32));
944  if (HasHi)
945    Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
946                     DAG.getConstant(Mips::sub_hi, MVT::i32));
947
948  if (!HasLo || !HasHi)
949    return HasLo ? Lo : Hi;
950
951  SDValue Vals[] = { Lo, Hi };
952  return DAG.getMergeValues(Vals, 2, DL);
953}
954
955
956static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
957  SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
958                             DAG.getConstant(0, MVT::i32));
959  SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
960                             DAG.getConstant(1, MVT::i32));
961  return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi);
962}
963
964static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) {
965  SDValue Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
966                           DAG.getConstant(Mips::sub_lo, MVT::i32));
967  SDValue Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
968                           DAG.getConstant(Mips::sub_hi, MVT::i32));
969  return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
970}
971
972// This function expands mips intrinsic nodes which have 64-bit input operands
973// or output values.
974//
975// out64 = intrinsic-node in64
976// =>
977// lo = copy (extract-element (in64, 0))
978// hi = copy (extract-element (in64, 1))
979// mips-specific-node
980// v0 = copy lo
981// v1 = copy hi
982// out64 = merge-values (v0, v1)
983//
984static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
985  SDLoc DL(Op);
986  bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
987  SmallVector<SDValue, 3> Ops;
988  unsigned OpNo = 0;
989
990  // See if Op has a chain input.
991  if (HasChainIn)
992    Ops.push_back(Op->getOperand(OpNo++));
993
994  // The next operand is the intrinsic opcode.
995  assert(Op->getOperand(OpNo).getOpcode() == ISD::TargetConstant);
996
997  // See if the next operand has type i64.
998  SDValue Opnd = Op->getOperand(++OpNo), In64;
999
1000  if (Opnd.getValueType() == MVT::i64)
1001    In64 = initAccumulator(Opnd, DL, DAG);
1002  else
1003    Ops.push_back(Opnd);
1004
1005  // Push the remaining operands.
1006  for (++OpNo ; OpNo < Op->getNumOperands(); ++OpNo)
1007    Ops.push_back(Op->getOperand(OpNo));
1008
1009  // Add In64 to the end of the list.
1010  if (In64.getNode())
1011    Ops.push_back(In64);
1012
1013  // Scan output.
1014  SmallVector<EVT, 2> ResTys;
1015
1016  for (SDNode::value_iterator I = Op->value_begin(), E = Op->value_end();
1017       I != E; ++I)
1018    ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I);
1019
1020  // Create node.
1021  SDValue Val = DAG.getNode(Opc, DL, ResTys, &Ops[0], Ops.size());
1022  SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val;
1023
1024  if (!HasChainIn)
1025    return Out;
1026
1027  assert(Val->getValueType(1) == MVT::Other);
1028  SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) };
1029  return DAG.getMergeValues(Vals, 2, DL);
1030}
1031
1032// Lower an MSA copy intrinsic into the specified SelectionDAG node
1033static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1034  SDLoc DL(Op);
1035  SDValue Vec = Op->getOperand(1);
1036  SDValue Idx = Op->getOperand(2);
1037  EVT ResTy = Op->getValueType(0);
1038  EVT EltTy = Vec->getValueType(0).getVectorElementType();
1039
1040  SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx,
1041                               DAG.getValueType(EltTy));
1042
1043  return Result;
1044}
1045
1046// Lower an MSA insert intrinsic into the specified SelectionDAG node
1047static SDValue lowerMSAInsertIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1048  SDLoc DL(Op);
1049  SDValue Op0 = Op->getOperand(1);
1050  SDValue Op1 = Op->getOperand(2);
1051  SDValue Op2 = Op->getOperand(3);
1052  EVT ResTy = Op->getValueType(0);
1053
1054  SDValue Result = DAG.getNode(Opc, DL, ResTy, Op0, Op2, Op1);
1055
1056  return Result;
1057}
1058
1059static SDValue
1060lowerMSASplatImm(SDLoc DL, EVT ResTy, SDValue ImmOp, SelectionDAG &DAG) {
1061  EVT ViaVecTy = ResTy;
1062  SmallVector<SDValue, 16> Ops;
1063  SDValue ImmHiOp;
1064
1065  if (ViaVecTy == MVT::v2i64) {
1066    ImmHiOp = DAG.getNode(ISD::SRA, DL, MVT::i32, ImmOp,
1067                          DAG.getConstant(31, MVT::i32));
1068    for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i) {
1069      Ops.push_back(ImmHiOp);
1070      Ops.push_back(ImmOp);
1071    }
1072    ViaVecTy = MVT::v4i32;
1073  } else {
1074    for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
1075      Ops.push_back(ImmOp);
1076  }
1077
1078  SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, &Ops[0],
1079                               Ops.size());
1080
1081  if (ResTy != ViaVecTy)
1082    Result = DAG.getNode(ISD::BITCAST, DL, ResTy, Result);
1083
1084  return Result;
1085}
1086
1087static SDValue
1088lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
1089  return lowerMSASplatImm(SDLoc(Op), Op->getValueType(0),
1090                          Op->getOperand(ImmOp), DAG);
1091}
1092
1093SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
1094                                                      SelectionDAG &DAG) const {
1095  SDLoc DL(Op);
1096
1097  switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
1098  default:
1099    return SDValue();
1100  case Intrinsic::mips_shilo:
1101    return lowerDSPIntr(Op, DAG, MipsISD::SHILO);
1102  case Intrinsic::mips_dpau_h_qbl:
1103    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL);
1104  case Intrinsic::mips_dpau_h_qbr:
1105    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR);
1106  case Intrinsic::mips_dpsu_h_qbl:
1107    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL);
1108  case Intrinsic::mips_dpsu_h_qbr:
1109    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR);
1110  case Intrinsic::mips_dpa_w_ph:
1111    return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH);
1112  case Intrinsic::mips_dps_w_ph:
1113    return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH);
1114  case Intrinsic::mips_dpax_w_ph:
1115    return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH);
1116  case Intrinsic::mips_dpsx_w_ph:
1117    return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH);
1118  case Intrinsic::mips_mulsa_w_ph:
1119    return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH);
1120  case Intrinsic::mips_mult:
1121    return lowerDSPIntr(Op, DAG, MipsISD::Mult);
1122  case Intrinsic::mips_multu:
1123    return lowerDSPIntr(Op, DAG, MipsISD::Multu);
1124  case Intrinsic::mips_madd:
1125    return lowerDSPIntr(Op, DAG, MipsISD::MAdd);
1126  case Intrinsic::mips_maddu:
1127    return lowerDSPIntr(Op, DAG, MipsISD::MAddu);
1128  case Intrinsic::mips_msub:
1129    return lowerDSPIntr(Op, DAG, MipsISD::MSub);
1130  case Intrinsic::mips_msubu:
1131    return lowerDSPIntr(Op, DAG, MipsISD::MSubu);
1132  case Intrinsic::mips_addv_b:
1133  case Intrinsic::mips_addv_h:
1134  case Intrinsic::mips_addv_w:
1135  case Intrinsic::mips_addv_d:
1136    return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
1137                       Op->getOperand(2));
1138  case Intrinsic::mips_addvi_b:
1139  case Intrinsic::mips_addvi_h:
1140  case Intrinsic::mips_addvi_w:
1141  case Intrinsic::mips_addvi_d:
1142    return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
1143                       lowerMSASplatImm(Op, 2, DAG));
1144  case Intrinsic::mips_and_v:
1145    return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
1146                       Op->getOperand(2));
1147  case Intrinsic::mips_andi_b:
1148    return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
1149                       lowerMSASplatImm(Op, 2, DAG));
1150  case Intrinsic::mips_bnz_b:
1151  case Intrinsic::mips_bnz_h:
1152  case Intrinsic::mips_bnz_w:
1153  case Intrinsic::mips_bnz_d:
1154    return DAG.getNode(MipsISD::VALL_NONZERO, DL, Op->getValueType(0),
1155                       Op->getOperand(1));
1156  case Intrinsic::mips_bnz_v:
1157    return DAG.getNode(MipsISD::VANY_NONZERO, DL, Op->getValueType(0),
1158                       Op->getOperand(1));
1159  case Intrinsic::mips_bsel_v:
1160    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
1161                       Op->getOperand(1), Op->getOperand(2),
1162                       Op->getOperand(3));
1163  case Intrinsic::mips_bseli_b:
1164    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
1165                       Op->getOperand(1), Op->getOperand(2),
1166                       lowerMSASplatImm(Op, 3, DAG));
1167  case Intrinsic::mips_bz_b:
1168  case Intrinsic::mips_bz_h:
1169  case Intrinsic::mips_bz_w:
1170  case Intrinsic::mips_bz_d:
1171    return DAG.getNode(MipsISD::VALL_ZERO, DL, Op->getValueType(0),
1172                       Op->getOperand(1));
1173  case Intrinsic::mips_bz_v:
1174    return DAG.getNode(MipsISD::VANY_ZERO, DL, Op->getValueType(0),
1175                       Op->getOperand(1));
1176  case Intrinsic::mips_ceq_b:
1177  case Intrinsic::mips_ceq_h:
1178  case Intrinsic::mips_ceq_w:
1179  case Intrinsic::mips_ceq_d:
1180    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1181                        Op->getOperand(2), ISD::SETEQ);
1182  case Intrinsic::mips_ceqi_b:
1183  case Intrinsic::mips_ceqi_h:
1184  case Intrinsic::mips_ceqi_w:
1185  case Intrinsic::mips_ceqi_d:
1186    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1187                        lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ);
1188  case Intrinsic::mips_cle_s_b:
1189  case Intrinsic::mips_cle_s_h:
1190  case Intrinsic::mips_cle_s_w:
1191  case Intrinsic::mips_cle_s_d:
1192    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1193                        Op->getOperand(2), ISD::SETLE);
1194  case Intrinsic::mips_clei_s_b:
1195  case Intrinsic::mips_clei_s_h:
1196  case Intrinsic::mips_clei_s_w:
1197  case Intrinsic::mips_clei_s_d:
1198    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1199                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLE);
1200  case Intrinsic::mips_cle_u_b:
1201  case Intrinsic::mips_cle_u_h:
1202  case Intrinsic::mips_cle_u_w:
1203  case Intrinsic::mips_cle_u_d:
1204    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1205                        Op->getOperand(2), ISD::SETULE);
1206  case Intrinsic::mips_clei_u_b:
1207  case Intrinsic::mips_clei_u_h:
1208  case Intrinsic::mips_clei_u_w:
1209  case Intrinsic::mips_clei_u_d:
1210    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1211                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULE);
1212  case Intrinsic::mips_clt_s_b:
1213  case Intrinsic::mips_clt_s_h:
1214  case Intrinsic::mips_clt_s_w:
1215  case Intrinsic::mips_clt_s_d:
1216    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1217                        Op->getOperand(2), ISD::SETLT);
1218  case Intrinsic::mips_clti_s_b:
1219  case Intrinsic::mips_clti_s_h:
1220  case Intrinsic::mips_clti_s_w:
1221  case Intrinsic::mips_clti_s_d:
1222    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1223                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLT);
1224  case Intrinsic::mips_clt_u_b:
1225  case Intrinsic::mips_clt_u_h:
1226  case Intrinsic::mips_clt_u_w:
1227  case Intrinsic::mips_clt_u_d:
1228    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1229                        Op->getOperand(2), ISD::SETULT);
1230  case Intrinsic::mips_clti_u_b:
1231  case Intrinsic::mips_clti_u_h:
1232  case Intrinsic::mips_clti_u_w:
1233  case Intrinsic::mips_clti_u_d:
1234    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1235                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULT);
1236  case Intrinsic::mips_copy_s_b:
1237  case Intrinsic::mips_copy_s_h:
1238  case Intrinsic::mips_copy_s_w:
1239    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
1240  case Intrinsic::mips_copy_u_b:
1241  case Intrinsic::mips_copy_u_h:
1242  case Intrinsic::mips_copy_u_w:
1243    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
1244  case Intrinsic::mips_div_s_b:
1245  case Intrinsic::mips_div_s_h:
1246  case Intrinsic::mips_div_s_w:
1247  case Intrinsic::mips_div_s_d:
1248    return DAG.getNode(ISD::SDIV, DL, Op->getValueType(0), Op->getOperand(1),
1249                       Op->getOperand(2));
1250  case Intrinsic::mips_div_u_b:
1251  case Intrinsic::mips_div_u_h:
1252  case Intrinsic::mips_div_u_w:
1253  case Intrinsic::mips_div_u_d:
1254    return DAG.getNode(ISD::UDIV, DL, Op->getValueType(0), Op->getOperand(1),
1255                       Op->getOperand(2));
1256  case Intrinsic::mips_fadd_w:
1257  case Intrinsic::mips_fadd_d:
1258    return DAG.getNode(ISD::FADD, DL, Op->getValueType(0), Op->getOperand(1),
1259                       Op->getOperand(2));
1260  // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away
1261  case Intrinsic::mips_fceq_w:
1262  case Intrinsic::mips_fceq_d:
1263    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1264                        Op->getOperand(2), ISD::SETOEQ);
1265  case Intrinsic::mips_fcle_w:
1266  case Intrinsic::mips_fcle_d:
1267    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1268                        Op->getOperand(2), ISD::SETOLE);
1269  case Intrinsic::mips_fclt_w:
1270  case Intrinsic::mips_fclt_d:
1271    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1272                        Op->getOperand(2), ISD::SETOLT);
1273  case Intrinsic::mips_fcne_w:
1274  case Intrinsic::mips_fcne_d:
1275    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1276                        Op->getOperand(2), ISD::SETONE);
1277  case Intrinsic::mips_fcor_w:
1278  case Intrinsic::mips_fcor_d:
1279    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1280                        Op->getOperand(2), ISD::SETO);
1281  case Intrinsic::mips_fcueq_w:
1282  case Intrinsic::mips_fcueq_d:
1283    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1284                        Op->getOperand(2), ISD::SETUEQ);
1285  case Intrinsic::mips_fcule_w:
1286  case Intrinsic::mips_fcule_d:
1287    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1288                        Op->getOperand(2), ISD::SETULE);
1289  case Intrinsic::mips_fcult_w:
1290  case Intrinsic::mips_fcult_d:
1291    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1292                        Op->getOperand(2), ISD::SETULT);
1293  case Intrinsic::mips_fcun_w:
1294  case Intrinsic::mips_fcun_d:
1295    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1296                        Op->getOperand(2), ISD::SETUO);
1297  case Intrinsic::mips_fcune_w:
1298  case Intrinsic::mips_fcune_d:
1299    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1300                        Op->getOperand(2), ISD::SETUNE);
1301  case Intrinsic::mips_fdiv_w:
1302  case Intrinsic::mips_fdiv_d:
1303    return DAG.getNode(ISD::FDIV, DL, Op->getValueType(0), Op->getOperand(1),
1304                       Op->getOperand(2));
1305  case Intrinsic::mips_fill_b:
1306  case Intrinsic::mips_fill_h:
1307  case Intrinsic::mips_fill_w: {
1308    SmallVector<SDValue, 16> Ops;
1309    EVT ResTy = Op->getValueType(0);
1310
1311    for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
1312      Ops.push_back(Op->getOperand(1));
1313
1314    return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, &Ops[0],
1315                       Ops.size());
1316  }
1317  case Intrinsic::mips_flog2_w:
1318  case Intrinsic::mips_flog2_d:
1319    return DAG.getNode(ISD::FLOG2, DL, Op->getValueType(0), Op->getOperand(1));
1320  case Intrinsic::mips_fmul_w:
1321  case Intrinsic::mips_fmul_d:
1322    return DAG.getNode(ISD::FMUL, DL, Op->getValueType(0), Op->getOperand(1),
1323                       Op->getOperand(2));
1324  case Intrinsic::mips_frint_w:
1325  case Intrinsic::mips_frint_d:
1326    return DAG.getNode(ISD::FRINT, DL, Op->getValueType(0), Op->getOperand(1));
1327  case Intrinsic::mips_fsqrt_w:
1328  case Intrinsic::mips_fsqrt_d:
1329    return DAG.getNode(ISD::FSQRT, DL, Op->getValueType(0), Op->getOperand(1));
1330  case Intrinsic::mips_fsub_w:
1331  case Intrinsic::mips_fsub_d:
1332    return DAG.getNode(ISD::FSUB, DL, Op->getValueType(0), Op->getOperand(1),
1333                       Op->getOperand(2));
1334  case Intrinsic::mips_ilvev_b:
1335  case Intrinsic::mips_ilvev_h:
1336  case Intrinsic::mips_ilvev_w:
1337  case Intrinsic::mips_ilvev_d:
1338    return DAG.getNode(MipsISD::ILVEV, DL, Op->getValueType(0),
1339                       Op->getOperand(1), Op->getOperand(2));
1340  case Intrinsic::mips_ilvl_b:
1341  case Intrinsic::mips_ilvl_h:
1342  case Intrinsic::mips_ilvl_w:
1343  case Intrinsic::mips_ilvl_d:
1344    return DAG.getNode(MipsISD::ILVL, DL, Op->getValueType(0),
1345                       Op->getOperand(1), Op->getOperand(2));
1346  case Intrinsic::mips_ilvod_b:
1347  case Intrinsic::mips_ilvod_h:
1348  case Intrinsic::mips_ilvod_w:
1349  case Intrinsic::mips_ilvod_d:
1350    return DAG.getNode(MipsISD::ILVOD, DL, Op->getValueType(0),
1351                       Op->getOperand(1), Op->getOperand(2));
1352  case Intrinsic::mips_ilvr_b:
1353  case Intrinsic::mips_ilvr_h:
1354  case Intrinsic::mips_ilvr_w:
1355  case Intrinsic::mips_ilvr_d:
1356    return DAG.getNode(MipsISD::ILVR, DL, Op->getValueType(0),
1357                       Op->getOperand(1), Op->getOperand(2));
1358  case Intrinsic::mips_insert_b:
1359  case Intrinsic::mips_insert_h:
1360  case Intrinsic::mips_insert_w:
1361    return lowerMSAInsertIntr(Op, DAG, ISD::INSERT_VECTOR_ELT);
1362  case Intrinsic::mips_ldi_b:
1363  case Intrinsic::mips_ldi_h:
1364  case Intrinsic::mips_ldi_w:
1365  case Intrinsic::mips_ldi_d:
1366    return lowerMSASplatImm(Op, 1, DAG);
1367  case Intrinsic::mips_max_s_b:
1368  case Intrinsic::mips_max_s_h:
1369  case Intrinsic::mips_max_s_w:
1370  case Intrinsic::mips_max_s_d:
1371    return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
1372                       Op->getOperand(1), Op->getOperand(2));
1373  case Intrinsic::mips_max_u_b:
1374  case Intrinsic::mips_max_u_h:
1375  case Intrinsic::mips_max_u_w:
1376  case Intrinsic::mips_max_u_d:
1377    return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
1378                       Op->getOperand(1), Op->getOperand(2));
1379  case Intrinsic::mips_maxi_s_b:
1380  case Intrinsic::mips_maxi_s_h:
1381  case Intrinsic::mips_maxi_s_w:
1382  case Intrinsic::mips_maxi_s_d:
1383    return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
1384                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1385  case Intrinsic::mips_maxi_u_b:
1386  case Intrinsic::mips_maxi_u_h:
1387  case Intrinsic::mips_maxi_u_w:
1388  case Intrinsic::mips_maxi_u_d:
1389    return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
1390                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1391  case Intrinsic::mips_min_s_b:
1392  case Intrinsic::mips_min_s_h:
1393  case Intrinsic::mips_min_s_w:
1394  case Intrinsic::mips_min_s_d:
1395    return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
1396                       Op->getOperand(1), Op->getOperand(2));
1397  case Intrinsic::mips_min_u_b:
1398  case Intrinsic::mips_min_u_h:
1399  case Intrinsic::mips_min_u_w:
1400  case Intrinsic::mips_min_u_d:
1401    return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
1402                       Op->getOperand(1), Op->getOperand(2));
1403  case Intrinsic::mips_mini_s_b:
1404  case Intrinsic::mips_mini_s_h:
1405  case Intrinsic::mips_mini_s_w:
1406  case Intrinsic::mips_mini_s_d:
1407    return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
1408                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1409  case Intrinsic::mips_mini_u_b:
1410  case Intrinsic::mips_mini_u_h:
1411  case Intrinsic::mips_mini_u_w:
1412  case Intrinsic::mips_mini_u_d:
1413    return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
1414                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1415  case Intrinsic::mips_mulv_b:
1416  case Intrinsic::mips_mulv_h:
1417  case Intrinsic::mips_mulv_w:
1418  case Intrinsic::mips_mulv_d:
1419    return DAG.getNode(ISD::MUL, DL, Op->getValueType(0), Op->getOperand(1),
1420                       Op->getOperand(2));
1421  case Intrinsic::mips_nlzc_b:
1422  case Intrinsic::mips_nlzc_h:
1423  case Intrinsic::mips_nlzc_w:
1424  case Intrinsic::mips_nlzc_d:
1425    return DAG.getNode(ISD::CTLZ, DL, Op->getValueType(0), Op->getOperand(1));
1426  case Intrinsic::mips_nor_v: {
1427    SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0),
1428                              Op->getOperand(1), Op->getOperand(2));
1429    return DAG.getNOT(DL, Res, Res->getValueType(0));
1430  }
1431  case Intrinsic::mips_nori_b: {
1432    SDValue Res =  DAG.getNode(ISD::OR, DL, Op->getValueType(0),
1433                               Op->getOperand(1),
1434                               lowerMSASplatImm(Op, 2, DAG));
1435    return DAG.getNOT(DL, Res, Res->getValueType(0));
1436  }
1437  case Intrinsic::mips_or_v:
1438    return DAG.getNode(ISD::OR, DL, Op->getValueType(0), Op->getOperand(1),
1439                       Op->getOperand(2));
1440  case Intrinsic::mips_ori_b:
1441    return DAG.getNode(ISD::OR, DL, Op->getValueType(0),
1442                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1443  case Intrinsic::mips_pckev_b:
1444  case Intrinsic::mips_pckev_h:
1445  case Intrinsic::mips_pckev_w:
1446  case Intrinsic::mips_pckev_d:
1447    return DAG.getNode(MipsISD::PCKEV, DL, Op->getValueType(0),
1448                       Op->getOperand(1), Op->getOperand(2));
1449  case Intrinsic::mips_pckod_b:
1450  case Intrinsic::mips_pckod_h:
1451  case Intrinsic::mips_pckod_w:
1452  case Intrinsic::mips_pckod_d:
1453    return DAG.getNode(MipsISD::PCKOD, DL, Op->getValueType(0),
1454                       Op->getOperand(1), Op->getOperand(2));
1455  case Intrinsic::mips_pcnt_b:
1456  case Intrinsic::mips_pcnt_h:
1457  case Intrinsic::mips_pcnt_w:
1458  case Intrinsic::mips_pcnt_d:
1459    return DAG.getNode(ISD::CTPOP, DL, Op->getValueType(0), Op->getOperand(1));
1460  case Intrinsic::mips_shf_b:
1461  case Intrinsic::mips_shf_h:
1462  case Intrinsic::mips_shf_w:
1463    return DAG.getNode(MipsISD::SHF, DL, Op->getValueType(0),
1464                       Op->getOperand(2), Op->getOperand(1));
1465  case Intrinsic::mips_sll_b:
1466  case Intrinsic::mips_sll_h:
1467  case Intrinsic::mips_sll_w:
1468  case Intrinsic::mips_sll_d:
1469    return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1),
1470                       Op->getOperand(2));
1471  case Intrinsic::mips_slli_b:
1472  case Intrinsic::mips_slli_h:
1473  case Intrinsic::mips_slli_w:
1474  case Intrinsic::mips_slli_d:
1475    return DAG.getNode(ISD::SHL, DL, Op->getValueType(0),
1476                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1477  case Intrinsic::mips_splati_b:
1478  case Intrinsic::mips_splati_h:
1479  case Intrinsic::mips_splati_w:
1480  case Intrinsic::mips_splati_d:
1481    return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
1482                       lowerMSASplatImm(Op, 2, DAG), Op->getOperand(1),
1483                       Op->getOperand(1));
1484  case Intrinsic::mips_sra_b:
1485  case Intrinsic::mips_sra_h:
1486  case Intrinsic::mips_sra_w:
1487  case Intrinsic::mips_sra_d:
1488    return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1),
1489                       Op->getOperand(2));
1490  case Intrinsic::mips_srai_b:
1491  case Intrinsic::mips_srai_h:
1492  case Intrinsic::mips_srai_w:
1493  case Intrinsic::mips_srai_d:
1494    return DAG.getNode(ISD::SRA, DL, Op->getValueType(0),
1495                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1496  case Intrinsic::mips_srl_b:
1497  case Intrinsic::mips_srl_h:
1498  case Intrinsic::mips_srl_w:
1499  case Intrinsic::mips_srl_d:
1500    return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1),
1501                       Op->getOperand(2));
1502  case Intrinsic::mips_srli_b:
1503  case Intrinsic::mips_srli_h:
1504  case Intrinsic::mips_srli_w:
1505  case Intrinsic::mips_srli_d:
1506    return DAG.getNode(ISD::SRL, DL, Op->getValueType(0),
1507                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1508  case Intrinsic::mips_subv_b:
1509  case Intrinsic::mips_subv_h:
1510  case Intrinsic::mips_subv_w:
1511  case Intrinsic::mips_subv_d:
1512    return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), Op->getOperand(1),
1513                       Op->getOperand(2));
1514  case Intrinsic::mips_subvi_b:
1515  case Intrinsic::mips_subvi_h:
1516  case Intrinsic::mips_subvi_w:
1517  case Intrinsic::mips_subvi_d:
1518    return DAG.getNode(ISD::SUB, DL, Op->getValueType(0),
1519                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1520  case Intrinsic::mips_vshf_b:
1521  case Intrinsic::mips_vshf_h:
1522  case Intrinsic::mips_vshf_w:
1523  case Intrinsic::mips_vshf_d:
1524    return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
1525                       Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
1526  case Intrinsic::mips_xor_v:
1527    return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), Op->getOperand(1),
1528                       Op->getOperand(2));
1529  case Intrinsic::mips_xori_b:
1530    return DAG.getNode(ISD::XOR, DL, Op->getValueType(0),
1531                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1532  }
1533}
1534
1535static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
1536  SDLoc DL(Op);
1537  SDValue ChainIn = Op->getOperand(0);
1538  SDValue Address = Op->getOperand(2);
1539  SDValue Offset  = Op->getOperand(3);
1540  EVT ResTy = Op->getValueType(0);
1541  EVT PtrTy = Address->getValueType(0);
1542
1543  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
1544
1545  return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false,
1546                     false, false, 16);
1547}
1548
1549SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
1550                                                     SelectionDAG &DAG) const {
1551  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
1552  switch (Intr) {
1553  default:
1554    return SDValue();
1555  case Intrinsic::mips_extp:
1556    return lowerDSPIntr(Op, DAG, MipsISD::EXTP);
1557  case Intrinsic::mips_extpdp:
1558    return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP);
1559  case Intrinsic::mips_extr_w:
1560    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W);
1561  case Intrinsic::mips_extr_r_w:
1562    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W);
1563  case Intrinsic::mips_extr_rs_w:
1564    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W);
1565  case Intrinsic::mips_extr_s_h:
1566    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H);
1567  case Intrinsic::mips_mthlip:
1568    return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP);
1569  case Intrinsic::mips_mulsaq_s_w_ph:
1570    return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH);
1571  case Intrinsic::mips_maq_s_w_phl:
1572    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL);
1573  case Intrinsic::mips_maq_s_w_phr:
1574    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR);
1575  case Intrinsic::mips_maq_sa_w_phl:
1576    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL);
1577  case Intrinsic::mips_maq_sa_w_phr:
1578    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR);
1579  case Intrinsic::mips_dpaq_s_w_ph:
1580    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH);
1581  case Intrinsic::mips_dpsq_s_w_ph:
1582    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH);
1583  case Intrinsic::mips_dpaq_sa_l_w:
1584    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W);
1585  case Intrinsic::mips_dpsq_sa_l_w:
1586    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W);
1587  case Intrinsic::mips_dpaqx_s_w_ph:
1588    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH);
1589  case Intrinsic::mips_dpaqx_sa_w_ph:
1590    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH);
1591  case Intrinsic::mips_dpsqx_s_w_ph:
1592    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH);
1593  case Intrinsic::mips_dpsqx_sa_w_ph:
1594    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH);
1595  case Intrinsic::mips_ld_b:
1596  case Intrinsic::mips_ld_h:
1597  case Intrinsic::mips_ld_w:
1598  case Intrinsic::mips_ld_d:
1599  case Intrinsic::mips_ldx_b:
1600  case Intrinsic::mips_ldx_h:
1601  case Intrinsic::mips_ldx_w:
1602  case Intrinsic::mips_ldx_d:
1603   return lowerMSALoadIntr(Op, DAG, Intr);
1604  }
1605}
1606
1607static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
1608  SDLoc DL(Op);
1609  SDValue ChainIn = Op->getOperand(0);
1610  SDValue Value   = Op->getOperand(2);
1611  SDValue Address = Op->getOperand(3);
1612  SDValue Offset  = Op->getOperand(4);
1613  EVT PtrTy = Address->getValueType(0);
1614
1615  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
1616
1617  return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false,
1618                      false, 16);
1619}
1620
1621SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
1622                                                  SelectionDAG &DAG) const {
1623  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
1624  switch (Intr) {
1625  default:
1626    return SDValue();
1627  case Intrinsic::mips_st_b:
1628  case Intrinsic::mips_st_h:
1629  case Intrinsic::mips_st_w:
1630  case Intrinsic::mips_st_d:
1631  case Intrinsic::mips_stx_b:
1632  case Intrinsic::mips_stx_h:
1633  case Intrinsic::mips_stx_w:
1634  case Intrinsic::mips_stx_d:
1635    return lowerMSAStoreIntr(Op, DAG, Intr);
1636  }
1637}
1638
1639/// \brief Check if the given BuildVectorSDNode is a splat.
1640/// This method currently relies on DAG nodes being reused when equivalent,
1641/// so it's possible for this to return false even when isConstantSplat returns
1642/// true.
1643static bool isSplatVector(const BuildVectorSDNode *N) {
1644  unsigned int nOps = N->getNumOperands();
1645  assert(nOps > 1 && "isSplat has 0 or 1 sized build vector");
1646
1647  SDValue Operand0 = N->getOperand(0);
1648
1649  for (unsigned int i = 1; i < nOps; ++i) {
1650    if (N->getOperand(i) != Operand0)
1651      return false;
1652  }
1653
1654  return true;
1655}
1656
1657// Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT.
1658//
1659// The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We
1660// choose to sign-extend but we could have equally chosen zero-extend. The
1661// DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT
1662// result into this node later (possibly changing it to a zero-extend in the
1663// process).
1664SDValue MipsSETargetLowering::
1665lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
1666  SDLoc DL(Op);
1667  EVT ResTy = Op->getValueType(0);
1668  SDValue Op0 = Op->getOperand(0);
1669  EVT VecTy = Op0->getValueType(0);
1670
1671  if (!VecTy.is128BitVector())
1672    return SDValue();
1673
1674  if (ResTy.isInteger()) {
1675    SDValue Op1 = Op->getOperand(1);
1676    EVT EltTy = VecTy.getVectorElementType();
1677    return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1,
1678                       DAG.getValueType(EltTy));
1679  }
1680
1681  return Op;
1682}
1683
1684static bool isConstantOrUndef(const SDValue Op) {
1685  if (Op->getOpcode() == ISD::UNDEF)
1686    return true;
1687  if (dyn_cast<ConstantSDNode>(Op))
1688    return true;
1689  if (dyn_cast<ConstantFPSDNode>(Op))
1690    return true;
1691  return false;
1692}
1693
1694static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) {
1695  for (unsigned i = 0; i < Op->getNumOperands(); ++i)
1696    if (isConstantOrUndef(Op->getOperand(i)))
1697      return true;
1698  return false;
1699}
1700
1701// Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the
1702// backend.
1703//
1704// Lowers according to the following rules:
1705// - Constant splats are legal as-is as long as the SplatBitSize is a power of
1706//   2 less than or equal to 64 and the value fits into a signed 10-bit
1707//   immediate
1708// - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize
1709//   is a power of 2 less than or equal to 64 and the value does not fit into a
1710//   signed 10-bit immediate
1711// - Non-constant splats are legal as-is.
1712// - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT.
1713// - All others are illegal and must be expanded.
1714SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
1715                                                SelectionDAG &DAG) const {
1716  BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
1717  EVT ResTy = Op->getValueType(0);
1718  SDLoc DL(Op);
1719  APInt SplatValue, SplatUndef;
1720  unsigned SplatBitSize;
1721  bool HasAnyUndefs;
1722
1723  if (!Subtarget->hasMSA() || !ResTy.is128BitVector())
1724    return SDValue();
1725
1726  if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
1727                            HasAnyUndefs, 8,
1728                            !Subtarget->isLittle()) && SplatBitSize <= 64) {
1729    // We can only cope with 8, 16, 32, or 64-bit elements
1730    if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1731        SplatBitSize != 64)
1732      return SDValue();
1733
1734    // If the value fits into a simm10 then we can use ldi.[bhwd]
1735    if (SplatValue.isSignedIntN(10))
1736      return Op;
1737
1738    EVT ViaVecTy;
1739
1740    switch (SplatBitSize) {
1741    default:
1742      return SDValue();
1743    case 8:
1744      ViaVecTy = MVT::v16i8;
1745      break;
1746    case 16:
1747      ViaVecTy = MVT::v8i16;
1748      break;
1749    case 32:
1750      ViaVecTy = MVT::v4i32;
1751      break;
1752    case 64:
1753      // There's no fill.d to fall back on for 64-bit values
1754      return SDValue();
1755    }
1756
1757    SmallVector<SDValue, 16> Ops;
1758    SDValue Constant = DAG.getConstant(SplatValue.sextOrSelf(32), MVT::i32);
1759
1760    for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i)
1761      Ops.push_back(Constant);
1762
1763    SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Node), ViaVecTy,
1764                                 &Ops[0], Ops.size());
1765
1766    if (ViaVecTy != ResTy)
1767      Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
1768
1769    return Result;
1770  } else if (isSplatVector(Node))
1771    return Op;
1772  else if (!isConstantOrUndefBUILD_VECTOR(Node)) {
1773    // Use INSERT_VECTOR_ELT operations rather than expand to stores.
1774    // The resulting code is the same length as the expansion, but it doesn't
1775    // use memory operations
1776    EVT ResTy = Node->getValueType(0);
1777
1778    assert(ResTy.isVector());
1779
1780    unsigned NumElts = ResTy.getVectorNumElements();
1781    SDValue Vector = DAG.getUNDEF(ResTy);
1782    for (unsigned i = 0; i < NumElts; ++i) {
1783      Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
1784                           Node->getOperand(i),
1785                           DAG.getConstant(i, MVT::i32));
1786    }
1787    return Vector;
1788  }
1789
1790  return SDValue();
1791}
1792
1793// Lower VECTOR_SHUFFLE into SHF (if possible).
1794//
1795// SHF splits the vector into blocks of four elements, then shuffles these
1796// elements according to a <4 x i2> constant (encoded as an integer immediate).
1797//
1798// It is therefore possible to lower into SHF when the mask takes the form:
1799//   <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
1800// When undef's appear they are treated as if they were whatever value is
1801// necessary in order to fit the above form.
1802//
1803// For example:
1804//   %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
1805//                      <8 x i32> <i32 3, i32 2, i32 1, i32 0,
1806//                                 i32 7, i32 6, i32 5, i32 4>
1807// is lowered to:
1808//   (SHF_H $w0, $w1, 27)
1809// where the 27 comes from:
1810//   3 + (2 << 2) + (1 << 4) + (0 << 6)
1811static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
1812                                       SmallVector<int, 16> Indices,
1813                                       SelectionDAG &DAG) {
1814  int SHFIndices[4] = { -1, -1, -1, -1 };
1815
1816  if (Indices.size() < 4)
1817    return SDValue();
1818
1819  for (unsigned i = 0; i < 4; ++i) {
1820    for (unsigned j = i; j < Indices.size(); j += 4) {
1821      int Idx = Indices[j];
1822
1823      // Convert from vector index to 4-element subvector index
1824      // If an index refers to an element outside of the subvector then give up
1825      if (Idx != -1) {
1826        Idx -= 4 * (j / 4);
1827        if (Idx < 0 || Idx >= 4)
1828          return SDValue();
1829      }
1830
1831      // If the mask has an undef, replace it with the current index.
1832      // Note that it might still be undef if the current index is also undef
1833      if (SHFIndices[i] == -1)
1834        SHFIndices[i] = Idx;
1835
1836      // Check that non-undef values are the same as in the mask. If they
1837      // aren't then give up
1838      if (!(Idx == -1 || Idx == SHFIndices[i]))
1839        return SDValue();
1840    }
1841  }
1842
1843  // Calculate the immediate. Replace any remaining undefs with zero
1844  APInt Imm(32, 0);
1845  for (int i = 3; i >= 0; --i) {
1846    int Idx = SHFIndices[i];
1847
1848    if (Idx == -1)
1849      Idx = 0;
1850
1851    Imm <<= 2;
1852    Imm |= Idx & 0x3;
1853  }
1854
1855  return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
1856                     DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
1857}
1858
1859// Lower VECTOR_SHUFFLE into ILVEV (if possible).
1860//
1861// ILVEV interleaves the even elements from each vector.
1862//
1863// It is possible to lower into ILVEV when the mask takes the form:
1864//   <0, n, 2, n+2, 4, n+4, ...>
1865// where n is the number of elements in the vector.
1866//
1867// When undef's appear in the mask they are treated as if they were whatever
1868// value is necessary in order to fit the above form.
1869static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy,
1870                                         SmallVector<int, 16> Indices,
1871                                         SelectionDAG &DAG) {
1872  assert ((Indices.size() % 2) == 0);
1873  int WsIdx = 0;
1874  int WtIdx = ResTy.getVectorNumElements();
1875
1876  for (unsigned i = 0; i < Indices.size(); i += 2) {
1877    if (Indices[i] != -1 && Indices[i] != WsIdx)
1878      return SDValue();
1879    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1880      return SDValue();
1881    WsIdx += 2;
1882    WtIdx += 2;
1883  }
1884
1885  return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0),
1886                     Op->getOperand(1));
1887}
1888
1889// Lower VECTOR_SHUFFLE into ILVOD (if possible).
1890//
1891// ILVOD interleaves the odd elements from each vector.
1892//
1893// It is possible to lower into ILVOD when the mask takes the form:
1894//   <1, n+1, 3, n+3, 5, n+5, ...>
1895// where n is the number of elements in the vector.
1896//
1897// When undef's appear in the mask they are treated as if they were whatever
1898// value is necessary in order to fit the above form.
1899static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy,
1900                                         SmallVector<int, 16> Indices,
1901                                         SelectionDAG &DAG) {
1902  assert ((Indices.size() % 2) == 0);
1903  int WsIdx = 1;
1904  int WtIdx = ResTy.getVectorNumElements() + 1;
1905
1906  for (unsigned i = 0; i < Indices.size(); i += 2) {
1907    if (Indices[i] != -1 && Indices[i] != WsIdx)
1908      return SDValue();
1909    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1910      return SDValue();
1911    WsIdx += 2;
1912    WtIdx += 2;
1913  }
1914
1915  return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0),
1916                     Op->getOperand(1));
1917}
1918
1919// Lower VECTOR_SHUFFLE into ILVL (if possible).
1920//
1921// ILVL interleaves consecutive elements from the left half of each vector.
1922//
1923// It is possible to lower into ILVL when the mask takes the form:
1924//   <0, n, 1, n+1, 2, n+2, ...>
1925// where n is the number of elements in the vector.
1926//
1927// When undef's appear in the mask they are treated as if they were whatever
1928// value is necessary in order to fit the above form.
1929static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
1930                                        SmallVector<int, 16> Indices,
1931                                        SelectionDAG &DAG) {
1932  assert ((Indices.size() % 2) == 0);
1933  int WsIdx = 0;
1934  int WtIdx = ResTy.getVectorNumElements();
1935
1936  for (unsigned i = 0; i < Indices.size(); i += 2) {
1937    if (Indices[i] != -1 && Indices[i] != WsIdx)
1938      return SDValue();
1939    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1940      return SDValue();
1941    WsIdx ++;
1942    WtIdx ++;
1943  }
1944
1945  return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0),
1946                     Op->getOperand(1));
1947}
1948
1949// Lower VECTOR_SHUFFLE into ILVR (if possible).
1950//
1951// ILVR interleaves consecutive elements from the right half of each vector.
1952//
1953// It is possible to lower into ILVR when the mask takes the form:
1954//   <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
1955// where n is the number of elements in the vector and x is half n.
1956//
1957// When undef's appear in the mask they are treated as if they were whatever
1958// value is necessary in order to fit the above form.
1959static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
1960                                        SmallVector<int, 16> Indices,
1961                                        SelectionDAG &DAG) {
1962  assert ((Indices.size() % 2) == 0);
1963  unsigned NumElts = ResTy.getVectorNumElements();
1964  int WsIdx = NumElts / 2;
1965  int WtIdx = NumElts + NumElts / 2;
1966
1967  for (unsigned i = 0; i < Indices.size(); i += 2) {
1968    if (Indices[i] != -1 && Indices[i] != WsIdx)
1969      return SDValue();
1970    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1971      return SDValue();
1972    WsIdx ++;
1973    WtIdx ++;
1974  }
1975
1976  return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0),
1977                     Op->getOperand(1));
1978}
1979
1980// Lower VECTOR_SHUFFLE into PCKEV (if possible).
1981//
1982// PCKEV copies the even elements of each vector into the result vector.
1983//
1984// It is possible to lower into PCKEV when the mask takes the form:
1985//   <0, 2, 4, ..., n, n+2, n+4, ...>
1986// where n is the number of elements in the vector.
1987//
1988// When undef's appear in the mask they are treated as if they were whatever
1989// value is necessary in order to fit the above form.
1990static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy,
1991                                         SmallVector<int, 16> Indices,
1992                                         SelectionDAG &DAG) {
1993  assert ((Indices.size() % 2) == 0);
1994  int Idx = 0;
1995
1996  for (unsigned i = 0; i < Indices.size(); ++i) {
1997    if (Indices[i] != -1 && Indices[i] != Idx)
1998      return SDValue();
1999    Idx += 2;
2000  }
2001
2002  return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0),
2003                     Op->getOperand(1));
2004}
2005
2006// Lower VECTOR_SHUFFLE into PCKOD (if possible).
2007//
2008// PCKOD copies the odd elements of each vector into the result vector.
2009//
2010// It is possible to lower into PCKOD when the mask takes the form:
2011//   <1, 3, 5, ..., n+1, n+3, n+5, ...>
2012// where n is the number of elements in the vector.
2013//
2014// When undef's appear in the mask they are treated as if they were whatever
2015// value is necessary in order to fit the above form.
2016static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
2017                                         SmallVector<int, 16> Indices,
2018                                         SelectionDAG &DAG) {
2019  assert ((Indices.size() % 2) == 0);
2020  int Idx = 1;
2021
2022  for (unsigned i = 0; i < Indices.size(); ++i) {
2023    if (Indices[i] != -1 && Indices[i] != Idx)
2024      return SDValue();
2025    Idx += 2;
2026  }
2027
2028  return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0),
2029                     Op->getOperand(1));
2030}
2031
2032// Lower VECTOR_SHUFFLE into VSHF.
2033//
2034// This mostly consists of converting the shuffle indices in Indices into a
2035// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is
2036// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example,
2037// if the type is v8i16 and all the indices are less than 8 then the second
2038// operand is unused and can be replaced with anything. We choose to replace it
2039// with the used operand since this reduces the number of instructions overall.
2040static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
2041                                        SmallVector<int, 16> Indices,
2042                                        SelectionDAG &DAG) {
2043  SmallVector<SDValue, 16> Ops;
2044  SDValue Op0;
2045  SDValue Op1;
2046  EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger();
2047  EVT MaskEltTy = MaskVecTy.getVectorElementType();
2048  bool Using1stVec = false;
2049  bool Using2ndVec = false;
2050  SDLoc DL(Op);
2051  int ResTyNumElts = ResTy.getVectorNumElements();
2052
2053  for (int i = 0; i < ResTyNumElts; ++i) {
2054    // Idx == -1 means UNDEF
2055    int Idx = Indices[i];
2056
2057    if (0 <= Idx && Idx < ResTyNumElts)
2058      Using1stVec = true;
2059    if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2)
2060      Using2ndVec = true;
2061  }
2062
2063  for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
2064       ++I)
2065    Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
2066
2067  SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
2068                                Ops.size());
2069
2070  if (Using1stVec && Using2ndVec) {
2071    Op0 = Op->getOperand(0);
2072    Op1 = Op->getOperand(1);
2073  } else if (Using1stVec)
2074    Op0 = Op1 = Op->getOperand(0);
2075  else if (Using2ndVec)
2076    Op0 = Op1 = Op->getOperand(1);
2077  else
2078    llvm_unreachable("shuffle vector mask references neither vector operand?");
2079
2080  return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
2081}
2082
2083// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
2084// indices in the shuffle.
2085SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
2086                                                  SelectionDAG &DAG) const {
2087  ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op);
2088  EVT ResTy = Op->getValueType(0);
2089
2090  if (!ResTy.is128BitVector())
2091    return SDValue();
2092
2093  int ResTyNumElts = ResTy.getVectorNumElements();
2094  SmallVector<int, 16> Indices;
2095
2096  for (int i = 0; i < ResTyNumElts; ++i)
2097    Indices.push_back(Node->getMaskElt(i));
2098
2099  SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
2100  if (Result.getNode())
2101    return Result;
2102  Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
2103  if (Result.getNode())
2104    return Result;
2105  Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
2106  if (Result.getNode())
2107    return Result;
2108  Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
2109  if (Result.getNode())
2110    return Result;
2111  Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
2112  if (Result.getNode())
2113    return Result;
2114  Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
2115  if (Result.getNode())
2116    return Result;
2117  Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
2118  if (Result.getNode())
2119    return Result;
2120  return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
2121}
2122
2123MachineBasicBlock * MipsSETargetLowering::
2124emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
2125  // $bb:
2126  //  bposge32_pseudo $vr0
2127  //  =>
2128  // $bb:
2129  //  bposge32 $tbb
2130  // $fbb:
2131  //  li $vr2, 0
2132  //  b $sink
2133  // $tbb:
2134  //  li $vr1, 1
2135  // $sink:
2136  //  $vr0 = phi($vr2, $fbb, $vr1, $tbb)
2137
2138  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2139  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2140  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2141  DebugLoc DL = MI->getDebugLoc();
2142  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2143  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2144  MachineFunction *F = BB->getParent();
2145  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2146  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2147  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2148  F->insert(It, FBB);
2149  F->insert(It, TBB);
2150  F->insert(It, Sink);
2151
2152  // Transfer the remainder of BB and its successor edges to Sink.
2153  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2154               BB->end());
2155  Sink->transferSuccessorsAndUpdatePHIs(BB);
2156
2157  // Add successors.
2158  BB->addSuccessor(FBB);
2159  BB->addSuccessor(TBB);
2160  FBB->addSuccessor(Sink);
2161  TBB->addSuccessor(Sink);
2162
2163  // Insert the real bposge32 instruction to $BB.
2164  BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB);
2165
2166  // Fill $FBB.
2167  unsigned VR2 = RegInfo.createVirtualRegister(RC);
2168  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2)
2169    .addReg(Mips::ZERO).addImm(0);
2170  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2171
2172  // Fill $TBB.
2173  unsigned VR1 = RegInfo.createVirtualRegister(RC);
2174  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1)
2175    .addReg(Mips::ZERO).addImm(1);
2176
2177  // Insert phi function to $Sink.
2178  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2179          MI->getOperand(0).getReg())
2180    .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB);
2181
2182  MI->eraseFromParent();   // The pseudo instruction is gone now.
2183  return Sink;
2184}
2185
2186MachineBasicBlock * MipsSETargetLowering::
2187emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
2188                     unsigned BranchOp) const{
2189  // $bb:
2190  //  vany_nonzero $rd, $ws
2191  //  =>
2192  // $bb:
2193  //  bnz.b $ws, $tbb
2194  //  b $fbb
2195  // $fbb:
2196  //  li $rd1, 0
2197  //  b $sink
2198  // $tbb:
2199  //  li $rd2, 1
2200  // $sink:
2201  //  $rd = phi($rd1, $fbb, $rd2, $tbb)
2202
2203  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2204  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2205  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2206  DebugLoc DL = MI->getDebugLoc();
2207  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2208  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2209  MachineFunction *F = BB->getParent();
2210  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2211  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2212  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2213  F->insert(It, FBB);
2214  F->insert(It, TBB);
2215  F->insert(It, Sink);
2216
2217  // Transfer the remainder of BB and its successor edges to Sink.
2218  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2219               BB->end());
2220  Sink->transferSuccessorsAndUpdatePHIs(BB);
2221
2222  // Add successors.
2223  BB->addSuccessor(FBB);
2224  BB->addSuccessor(TBB);
2225  FBB->addSuccessor(Sink);
2226  TBB->addSuccessor(Sink);
2227
2228  // Insert the real bnz.b instruction to $BB.
2229  BuildMI(BB, DL, TII->get(BranchOp))
2230    .addReg(MI->getOperand(1).getReg())
2231    .addMBB(TBB);
2232
2233  // Fill $FBB.
2234  unsigned RD1 = RegInfo.createVirtualRegister(RC);
2235  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1)
2236    .addReg(Mips::ZERO).addImm(0);
2237  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2238
2239  // Fill $TBB.
2240  unsigned RD2 = RegInfo.createVirtualRegister(RC);
2241  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2)
2242    .addReg(Mips::ZERO).addImm(1);
2243
2244  // Insert phi function to $Sink.
2245  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2246          MI->getOperand(0).getReg())
2247    .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB);
2248
2249  MI->eraseFromParent();   // The pseudo instruction is gone now.
2250  return Sink;
2251}
2252
2253// Emit the COPY_FW pseudo instruction.
2254//
2255// copy_fw_pseudo $fd, $ws, n
2256// =>
2257// copy_u_w $rt, $ws, $n
2258// mtc1     $rt, $fd
2259//
2260// When n is zero, the equivalent operation can be performed with (potentially)
2261// zero instructions due to register overlaps. This optimization is never valid
2262// for lane 1 because it would require FR=0 mode which isn't supported by MSA.
2263MachineBasicBlock * MipsSETargetLowering::
2264emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
2265  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2266  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2267  DebugLoc DL = MI->getDebugLoc();
2268  unsigned Fd = MI->getOperand(0).getReg();
2269  unsigned Ws = MI->getOperand(1).getReg();
2270  unsigned Lane = MI->getOperand(2).getImm();
2271
2272  if (Lane == 0)
2273    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_lo);
2274  else {
2275    unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
2276
2277    BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(1);
2278    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
2279  }
2280
2281  MI->eraseFromParent();   // The pseudo instruction is gone now.
2282  return BB;
2283}
2284
2285// Emit the COPY_FD pseudo instruction.
2286//
2287// copy_fd_pseudo $fd, $ws, n
2288// =>
2289// splati.d $wt, $ws, $n
2290// copy $fd, $wt:sub_64
2291//
2292// When n is zero, the equivalent operation can be performed with (potentially)
2293// zero instructions due to register overlaps. This optimization is always
2294// valid because FR=1 mode which is the only supported mode in MSA.
2295MachineBasicBlock * MipsSETargetLowering::
2296emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
2297  assert(Subtarget->isFP64bit());
2298
2299  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2300  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2301  unsigned Fd  = MI->getOperand(0).getReg();
2302  unsigned Ws  = MI->getOperand(1).getReg();
2303  unsigned Lane = MI->getOperand(2).getImm() * 2;
2304  DebugLoc DL = MI->getDebugLoc();
2305
2306  if (Lane == 0)
2307    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64);
2308  else {
2309    unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
2310
2311    BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1);
2312    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64);
2313  }
2314
2315  MI->eraseFromParent();   // The pseudo instruction is gone now.
2316  return BB;
2317}
2318