MipsSEISelLowering.cpp revision 3706eda52c4565016959902a3f5aaf7271516286
1//===-- MipsSEISelLowering.cpp - MipsSE DAG Lowering Interface --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Subclass of MipsTargetLowering specialized for mips32/64.
11//
12//===----------------------------------------------------------------------===//
13#include "MipsSEISelLowering.h"
14#include "MipsRegisterInfo.h"
15#include "MipsTargetMachine.h"
16#include "llvm/CodeGen/MachineInstrBuilder.h"
17#include "llvm/CodeGen/MachineRegisterInfo.h"
18#include "llvm/IR/Intrinsics.h"
19#include "llvm/Support/CommandLine.h"
20#include "llvm/Target/TargetInstrInfo.h"
21
22using namespace llvm;
23
24static cl::opt<bool>
25EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
26                    cl::desc("MIPS: Enable tail calls."), cl::init(false));
27
28static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false),
29                                   cl::desc("Expand double precision loads and "
30                                            "stores to their single precision "
31                                            "counterparts"));
32
33MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
34  : MipsTargetLowering(TM) {
35  // Set up the register classes
36
37  clearRegisterClasses();
38
39  addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
40
41  if (HasMips64)
42    addRegisterClass(MVT::i64, &Mips::GPR64RegClass);
43
44  if (Subtarget->hasDSP()) {
45    MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
46
47    for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
48      addRegisterClass(VecTys[i], &Mips::DSPRRegClass);
49
50      // Expand all builtin opcodes.
51      for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
52        setOperationAction(Opc, VecTys[i], Expand);
53
54      setOperationAction(ISD::ADD, VecTys[i], Legal);
55      setOperationAction(ISD::SUB, VecTys[i], Legal);
56      setOperationAction(ISD::LOAD, VecTys[i], Legal);
57      setOperationAction(ISD::STORE, VecTys[i], Legal);
58      setOperationAction(ISD::BITCAST, VecTys[i], Legal);
59    }
60
61    // Expand all truncating stores and extending loads.
62    unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
63    unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
64
65    for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) {
66      for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1)
67        setTruncStoreAction((MVT::SimpleValueType)VT0,
68                            (MVT::SimpleValueType)VT1, Expand);
69
70      setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
71      setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
72      setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand);
73    }
74
75    setTargetDAGCombine(ISD::SHL);
76    setTargetDAGCombine(ISD::SRA);
77    setTargetDAGCombine(ISD::SRL);
78    setTargetDAGCombine(ISD::SETCC);
79    setTargetDAGCombine(ISD::VSELECT);
80  }
81
82  if (Subtarget->hasDSPR2())
83    setOperationAction(ISD::MUL, MVT::v2i16, Legal);
84
85  if (Subtarget->hasMSA()) {
86    addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass);
87    addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass);
88    addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass);
89    addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass);
90    addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass);
91    addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass);
92    addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass);
93
94    setTargetDAGCombine(ISD::AND);
95    setTargetDAGCombine(ISD::SRA);
96    setTargetDAGCombine(ISD::VSELECT);
97    setTargetDAGCombine(ISD::XOR);
98  }
99
100  if (!Subtarget->mipsSEUsesSoftFloat()) {
101    addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
102
103    // When dealing with single precision only, use libcalls
104    if (!Subtarget->isSingleFloat()) {
105      if (Subtarget->isFP64bit())
106        addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
107      else
108        addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
109    }
110  }
111
112  setOperationAction(ISD::SMUL_LOHI,          MVT::i32, Custom);
113  setOperationAction(ISD::UMUL_LOHI,          MVT::i32, Custom);
114  setOperationAction(ISD::MULHS,              MVT::i32, Custom);
115  setOperationAction(ISD::MULHU,              MVT::i32, Custom);
116
117  if (HasMips64) {
118    setOperationAction(ISD::MULHS,            MVT::i64, Custom);
119    setOperationAction(ISD::MULHU,            MVT::i64, Custom);
120    setOperationAction(ISD::MUL,              MVT::i64, Custom);
121  }
122
123  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
124  setOperationAction(ISD::INTRINSIC_W_CHAIN,  MVT::i64, Custom);
125
126  setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
127  setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
128  setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
129  setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
130  setOperationAction(ISD::ATOMIC_FENCE,       MVT::Other, Custom);
131  setOperationAction(ISD::LOAD,               MVT::i32, Custom);
132  setOperationAction(ISD::STORE,              MVT::i32, Custom);
133
134  setTargetDAGCombine(ISD::ADDE);
135  setTargetDAGCombine(ISD::SUBE);
136  setTargetDAGCombine(ISD::MUL);
137
138  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
139  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
140  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
141
142  if (NoDPLoadStore) {
143    setOperationAction(ISD::LOAD, MVT::f64, Custom);
144    setOperationAction(ISD::STORE, MVT::f64, Custom);
145  }
146
147  computeRegisterProperties();
148}
149
150const MipsTargetLowering *
151llvm::createMipsSETargetLowering(MipsTargetMachine &TM) {
152  return new MipsSETargetLowering(TM);
153}
154
155// Enable MSA support for the given integer type and Register class.
156void MipsSETargetLowering::
157addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
158  addRegisterClass(Ty, RC);
159
160  // Expand all builtin opcodes.
161  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
162    setOperationAction(Opc, Ty, Expand);
163
164  setOperationAction(ISD::BITCAST, Ty, Legal);
165  setOperationAction(ISD::LOAD, Ty, Legal);
166  setOperationAction(ISD::STORE, Ty, Legal);
167  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom);
168  setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
169  setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
170
171  setOperationAction(ISD::ADD, Ty, Legal);
172  setOperationAction(ISD::AND, Ty, Legal);
173  setOperationAction(ISD::CTLZ, Ty, Legal);
174  setOperationAction(ISD::CTPOP, Ty, Legal);
175  setOperationAction(ISD::MUL, Ty, Legal);
176  setOperationAction(ISD::OR, Ty, Legal);
177  setOperationAction(ISD::SDIV, Ty, Legal);
178  setOperationAction(ISD::SHL, Ty, Legal);
179  setOperationAction(ISD::SRA, Ty, Legal);
180  setOperationAction(ISD::SRL, Ty, Legal);
181  setOperationAction(ISD::SUB, Ty, Legal);
182  setOperationAction(ISD::UDIV, Ty, Legal);
183  setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom);
184  setOperationAction(ISD::VSELECT, Ty, Legal);
185  setOperationAction(ISD::XOR, Ty, Legal);
186
187  setOperationAction(ISD::SETCC, Ty, Legal);
188  setCondCodeAction(ISD::SETNE, Ty, Expand);
189  setCondCodeAction(ISD::SETGE, Ty, Expand);
190  setCondCodeAction(ISD::SETGT, Ty, Expand);
191  setCondCodeAction(ISD::SETUGE, Ty, Expand);
192  setCondCodeAction(ISD::SETUGT, Ty, Expand);
193}
194
195// Enable MSA support for the given floating-point type and Register class.
196void MipsSETargetLowering::
197addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
198  addRegisterClass(Ty, RC);
199
200  // Expand all builtin opcodes.
201  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
202    setOperationAction(Opc, Ty, Expand);
203
204  setOperationAction(ISD::LOAD, Ty, Legal);
205  setOperationAction(ISD::STORE, Ty, Legal);
206  setOperationAction(ISD::BITCAST, Ty, Legal);
207  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal);
208
209  if (Ty != MVT::v8f16) {
210    setOperationAction(ISD::FABS,  Ty, Legal);
211    setOperationAction(ISD::FADD,  Ty, Legal);
212    setOperationAction(ISD::FDIV,  Ty, Legal);
213    setOperationAction(ISD::FLOG2, Ty, Legal);
214    setOperationAction(ISD::FMUL,  Ty, Legal);
215    setOperationAction(ISD::FRINT, Ty, Legal);
216    setOperationAction(ISD::FSQRT, Ty, Legal);
217    setOperationAction(ISD::FSUB,  Ty, Legal);
218    setOperationAction(ISD::VSELECT, Ty, Legal);
219
220    setOperationAction(ISD::SETCC, Ty, Legal);
221    setCondCodeAction(ISD::SETOGE, Ty, Expand);
222    setCondCodeAction(ISD::SETOGT, Ty, Expand);
223    setCondCodeAction(ISD::SETUGE, Ty, Expand);
224    setCondCodeAction(ISD::SETUGT, Ty, Expand);
225    setCondCodeAction(ISD::SETGE,  Ty, Expand);
226    setCondCodeAction(ISD::SETGT,  Ty, Expand);
227  }
228}
229
230bool
231MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
232  MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
233
234  switch (SVT) {
235  case MVT::i64:
236  case MVT::i32:
237    if (Fast)
238      *Fast = true;
239    return true;
240  default:
241    return false;
242  }
243}
244
245SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
246                                             SelectionDAG &DAG) const {
247  switch(Op.getOpcode()) {
248  case ISD::LOAD:  return lowerLOAD(Op, DAG);
249  case ISD::STORE: return lowerSTORE(Op, DAG);
250  case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG);
251  case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG);
252  case ISD::MULHS:     return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG);
253  case ISD::MULHU:     return lowerMulDiv(Op, MipsISD::Multu, false, true, DAG);
254  case ISD::MUL:       return lowerMulDiv(Op, MipsISD::Mult, true, false, DAG);
255  case ISD::SDIVREM:   return lowerMulDiv(Op, MipsISD::DivRem, true, true, DAG);
256  case ISD::UDIVREM:   return lowerMulDiv(Op, MipsISD::DivRemU, true, true,
257                                          DAG);
258  case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG);
259  case ISD::INTRINSIC_W_CHAIN:  return lowerINTRINSIC_W_CHAIN(Op, DAG);
260  case ISD::INTRINSIC_VOID:     return lowerINTRINSIC_VOID(Op, DAG);
261  case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG);
262  case ISD::BUILD_VECTOR:       return lowerBUILD_VECTOR(Op, DAG);
263  case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, DAG);
264  }
265
266  return MipsTargetLowering::LowerOperation(Op, DAG);
267}
268
269// selectMADD -
270// Transforms a subgraph in CurDAG if the following pattern is found:
271//  (addc multLo, Lo0), (adde multHi, Hi0),
272// where,
273//  multHi/Lo: product of multiplication
274//  Lo0: initial value of Lo register
275//  Hi0: initial value of Hi register
276// Return true if pattern matching was successful.
277static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
278  // ADDENode's second operand must be a flag output of an ADDC node in order
279  // for the matching to be successful.
280  SDNode *ADDCNode = ADDENode->getOperand(2).getNode();
281
282  if (ADDCNode->getOpcode() != ISD::ADDC)
283    return false;
284
285  SDValue MultHi = ADDENode->getOperand(0);
286  SDValue MultLo = ADDCNode->getOperand(0);
287  SDNode *MultNode = MultHi.getNode();
288  unsigned MultOpc = MultHi.getOpcode();
289
290  // MultHi and MultLo must be generated by the same node,
291  if (MultLo.getNode() != MultNode)
292    return false;
293
294  // and it must be a multiplication.
295  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
296    return false;
297
298  // MultLo amd MultHi must be the first and second output of MultNode
299  // respectively.
300  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
301    return false;
302
303  // Transform this to a MADD only if ADDENode and ADDCNode are the only users
304  // of the values of MultNode, in which case MultNode will be removed in later
305  // phases.
306  // If there exist users other than ADDENode or ADDCNode, this function returns
307  // here, which will result in MultNode being mapped to a single MULT
308  // instruction node rather than a pair of MULT and MADD instructions being
309  // produced.
310  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
311    return false;
312
313  SDLoc DL(ADDENode);
314
315  // Initialize accumulator.
316  SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
317                                  ADDCNode->getOperand(1),
318                                  ADDENode->getOperand(1));
319
320  // create MipsMAdd(u) node
321  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd;
322
323  SDValue MAdd = CurDAG->getNode(MultOpc, DL, MVT::Untyped,
324                                 MultNode->getOperand(0),// Factor 0
325                                 MultNode->getOperand(1),// Factor 1
326                                 ACCIn);
327
328  // replace uses of adde and addc here
329  if (!SDValue(ADDCNode, 0).use_empty()) {
330    SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
331    SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
332                                    LoIdx);
333    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut);
334  }
335  if (!SDValue(ADDENode, 0).use_empty()) {
336    SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
337    SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MAdd,
338                                    HiIdx);
339    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut);
340  }
341
342  return true;
343}
344
345// selectMSUB -
346// Transforms a subgraph in CurDAG if the following pattern is found:
347//  (addc Lo0, multLo), (sube Hi0, multHi),
348// where,
349//  multHi/Lo: product of multiplication
350//  Lo0: initial value of Lo register
351//  Hi0: initial value of Hi register
352// Return true if pattern matching was successful.
353static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
354  // SUBENode's second operand must be a flag output of an SUBC node in order
355  // for the matching to be successful.
356  SDNode *SUBCNode = SUBENode->getOperand(2).getNode();
357
358  if (SUBCNode->getOpcode() != ISD::SUBC)
359    return false;
360
361  SDValue MultHi = SUBENode->getOperand(1);
362  SDValue MultLo = SUBCNode->getOperand(1);
363  SDNode *MultNode = MultHi.getNode();
364  unsigned MultOpc = MultHi.getOpcode();
365
366  // MultHi and MultLo must be generated by the same node,
367  if (MultLo.getNode() != MultNode)
368    return false;
369
370  // and it must be a multiplication.
371  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
372    return false;
373
374  // MultLo amd MultHi must be the first and second output of MultNode
375  // respectively.
376  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
377    return false;
378
379  // Transform this to a MSUB only if SUBENode and SUBCNode are the only users
380  // of the values of MultNode, in which case MultNode will be removed in later
381  // phases.
382  // If there exist users other than SUBENode or SUBCNode, this function returns
383  // here, which will result in MultNode being mapped to a single MULT
384  // instruction node rather than a pair of MULT and MSUB instructions being
385  // produced.
386  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
387    return false;
388
389  SDLoc DL(SUBENode);
390
391  // Initialize accumulator.
392  SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped,
393                                  SUBCNode->getOperand(0),
394                                  SUBENode->getOperand(0));
395
396  // create MipsSub(u) node
397  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub;
398
399  SDValue MSub = CurDAG->getNode(MultOpc, DL, MVT::Glue,
400                                 MultNode->getOperand(0),// Factor 0
401                                 MultNode->getOperand(1),// Factor 1
402                                 ACCIn);
403
404  // replace uses of sube and subc here
405  if (!SDValue(SUBCNode, 0).use_empty()) {
406    SDValue LoIdx = CurDAG->getConstant(Mips::sub_lo, MVT::i32);
407    SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
408                                    LoIdx);
409    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut);
410  }
411  if (!SDValue(SUBENode, 0).use_empty()) {
412    SDValue HiIdx = CurDAG->getConstant(Mips::sub_hi, MVT::i32);
413    SDValue HiOut = CurDAG->getNode(MipsISD::ExtractLOHI, DL, MVT::i32, MSub,
414                                    HiIdx);
415    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut);
416  }
417
418  return true;
419}
420
421static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
422                                  TargetLowering::DAGCombinerInfo &DCI,
423                                  const MipsSubtarget *Subtarget) {
424  if (DCI.isBeforeLegalize())
425    return SDValue();
426
427  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
428      selectMADD(N, &DAG))
429    return SDValue(N, 0);
430
431  return SDValue();
432}
433
434// Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT
435//
436// Performs the following transformations:
437// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its
438//   sign/zero-extension is completely overwritten by the new one performed by
439//   the ISD::AND.
440// - Removes redundant zero extensions performed by an ISD::AND.
441static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
442                                 TargetLowering::DAGCombinerInfo &DCI,
443                                 const MipsSubtarget *Subtarget) {
444  if (!Subtarget->hasMSA())
445    return SDValue();
446
447  SDValue Op0 = N->getOperand(0);
448  SDValue Op1 = N->getOperand(1);
449  unsigned Op0Opcode = Op0->getOpcode();
450
451  // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d)
452  // where $d + 1 == 2^n and n == 32
453  // or    $d + 1 == 2^n and n <= 32 and ZExt
454  // -> (MipsVExtractZExt $a, $b, $c)
455  if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT ||
456      Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) {
457    ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1);
458
459    if (!Mask)
460      return SDValue();
461
462    int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2();
463
464    if (Log2IfPositive <= 0)
465      return SDValue(); // Mask+1 is not a power of 2
466
467    SDValue Op0Op2 = Op0->getOperand(2);
468    EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT();
469    unsigned ExtendTySize = ExtendTy.getSizeInBits();
470    unsigned Log2 = Log2IfPositive;
471
472    if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) ||
473        Log2 == ExtendTySize) {
474      SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 };
475      DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT,
476                      Op0->getVTList(), Ops, Op0->getNumOperands());
477      return Op0;
478    }
479  }
480
481  return SDValue();
482}
483
484static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
485                                  TargetLowering::DAGCombinerInfo &DCI,
486                                  const MipsSubtarget *Subtarget) {
487  if (DCI.isBeforeLegalize())
488    return SDValue();
489
490  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
491      selectMSUB(N, &DAG))
492    return SDValue(N, 0);
493
494  return SDValue();
495}
496
497static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
498                            EVT ShiftTy, SelectionDAG &DAG) {
499  // Clear the upper (64 - VT.sizeInBits) bits.
500  C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits());
501
502  // Return 0.
503  if (C == 0)
504    return DAG.getConstant(0, VT);
505
506  // Return x.
507  if (C == 1)
508    return X;
509
510  // If c is power of 2, return (shl x, log2(c)).
511  if (isPowerOf2_64(C))
512    return DAG.getNode(ISD::SHL, DL, VT, X,
513                       DAG.getConstant(Log2_64(C), ShiftTy));
514
515  unsigned Log2Ceil = Log2_64_Ceil(C);
516  uint64_t Floor = 1LL << Log2_64(C);
517  uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil;
518
519  // If |c - floor_c| <= |c - ceil_c|,
520  // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))),
521  // return (add constMult(x, floor_c), constMult(x, c - floor_c)).
522  if (C - Floor <= Ceil - C) {
523    SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG);
524    SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG);
525    return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
526  }
527
528  // If |c - floor_c| > |c - ceil_c|,
529  // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)).
530  SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG);
531  SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG);
532  return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
533}
534
535static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
536                                 const TargetLowering::DAGCombinerInfo &DCI,
537                                 const MipsSETargetLowering *TL) {
538  EVT VT = N->getValueType(0);
539
540  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
541    if (!VT.isVector())
542      return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N),
543                          VT, TL->getScalarShiftAmountTy(VT), DAG);
544
545  return SDValue(N, 0);
546}
547
548static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
549                                      SelectionDAG &DAG,
550                                      const MipsSubtarget *Subtarget) {
551  // See if this is a vector splat immediate node.
552  APInt SplatValue, SplatUndef;
553  unsigned SplatBitSize;
554  bool HasAnyUndefs;
555  unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
556  BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
557
558  if (!BV ||
559      !BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
560                           EltSize, !Subtarget->isLittle()) ||
561      (SplatBitSize != EltSize) ||
562      (SplatValue.getZExtValue() >= EltSize))
563    return SDValue();
564
565  return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
566                     DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
567}
568
569static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
570                                 TargetLowering::DAGCombinerInfo &DCI,
571                                 const MipsSubtarget *Subtarget) {
572  EVT Ty = N->getValueType(0);
573
574  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
575    return SDValue();
576
577  return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget);
578}
579
580// Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold
581// constant splats into MipsISD::SHRA_DSP for DSPr2.
582//
583// Performs the following transformations:
584// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its
585//   sign/zero-extension is completely overwritten by the new one performed by
586//   the ISD::SRA and ISD::SHL nodes.
587// - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL
588//   sequence.
589//
590// See performDSPShiftCombine for more information about the transformation
591// used for DSPr2.
592static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
593                                 TargetLowering::DAGCombinerInfo &DCI,
594                                 const MipsSubtarget *Subtarget) {
595  EVT Ty = N->getValueType(0);
596
597  if (Subtarget->hasMSA()) {
598    SDValue Op0 = N->getOperand(0);
599    SDValue Op1 = N->getOperand(1);
600
601    // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d)
602    // where $d + sizeof($c) == 32
603    // or    $d + sizeof($c) <= 32 and SExt
604    // -> (MipsVExtractSExt $a, $b, $c)
605    if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) {
606      SDValue Op0Op0 = Op0->getOperand(0);
607      ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1);
608
609      if (!ShAmount)
610        return SDValue();
611
612      EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT();
613      unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits();
614
615      if (TotalBits == 32 ||
616          (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT &&
617           TotalBits <= 32)) {
618        SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1),
619                          Op0Op0->getOperand(2) };
620        DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT,
621                        Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands());
622        return Op0Op0;
623      }
624    }
625  }
626
627  if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2()))
628    return SDValue();
629
630  return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget);
631}
632
633
634static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG,
635                                 TargetLowering::DAGCombinerInfo &DCI,
636                                 const MipsSubtarget *Subtarget) {
637  EVT Ty = N->getValueType(0);
638
639  if (((Ty != MVT::v2i16) || !Subtarget->hasDSPR2()) && (Ty != MVT::v4i8))
640    return SDValue();
641
642  return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget);
643}
644
645static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC) {
646  bool IsV216 = (Ty == MVT::v2i16);
647
648  switch (CC) {
649  case ISD::SETEQ:
650  case ISD::SETNE:  return true;
651  case ISD::SETLT:
652  case ISD::SETLE:
653  case ISD::SETGT:
654  case ISD::SETGE:  return IsV216;
655  case ISD::SETULT:
656  case ISD::SETULE:
657  case ISD::SETUGT:
658  case ISD::SETUGE: return !IsV216;
659  default:          return false;
660  }
661}
662
663static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
664  EVT Ty = N->getValueType(0);
665
666  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
667    return SDValue();
668
669  if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get()))
670    return SDValue();
671
672  return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0),
673                     N->getOperand(1), N->getOperand(2));
674}
675
676static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
677  EVT Ty = N->getValueType(0);
678
679  if (Ty.is128BitVector() && Ty.isInteger()) {
680    // Try the following combines:
681    //   (vselect (setcc $a, $b, SETLT), $b, $a)) -> (vsmax $a, $b)
682    //   (vselect (setcc $a, $b, SETLE), $b, $a)) -> (vsmax $a, $b)
683    //   (vselect (setcc $a, $b, SETLT), $a, $b)) -> (vsmin $a, $b)
684    //   (vselect (setcc $a, $b, SETLE), $a, $b)) -> (vsmin $a, $b)
685    //   (vselect (setcc $a, $b, SETULT), $b, $a)) -> (vumax $a, $b)
686    //   (vselect (setcc $a, $b, SETULE), $b, $a)) -> (vumax $a, $b)
687    //   (vselect (setcc $a, $b, SETULT), $a, $b)) -> (vumin $a, $b)
688    //   (vselect (setcc $a, $b, SETULE), $a, $b)) -> (vumin $a, $b)
689    // SETGT/SETGE/SETUGT/SETUGE variants of these will show up initially but
690    // will be expanded to equivalent SETLT/SETLE/SETULT/SETULE versions by the
691    // legalizer.
692    SDValue Op0 = N->getOperand(0);
693
694    if (Op0->getOpcode() != ISD::SETCC)
695      return SDValue();
696
697    ISD::CondCode CondCode = cast<CondCodeSDNode>(Op0->getOperand(2))->get();
698    bool Signed;
699
700    if (CondCode == ISD::SETLT  || CondCode == ISD::SETLE)
701      Signed = true;
702    else if (CondCode == ISD::SETULT || CondCode == ISD::SETULE)
703      Signed = false;
704    else
705      return SDValue();
706
707    SDValue Op1 = N->getOperand(1);
708    SDValue Op2 = N->getOperand(2);
709    SDValue Op0Op0 = Op0->getOperand(0);
710    SDValue Op0Op1 = Op0->getOperand(1);
711
712    if (Op1 == Op0Op0 && Op2 == Op0Op1)
713      return DAG.getNode(Signed ? MipsISD::VSMIN : MipsISD::VUMIN, SDLoc(N),
714                         Ty, Op1, Op2);
715    else if (Op1 == Op0Op1 && Op2 == Op0Op0)
716      return DAG.getNode(Signed ? MipsISD::VSMAX : MipsISD::VUMAX, SDLoc(N),
717                         Ty, Op1, Op2);
718  } else if ((Ty == MVT::v2i16) || (Ty == MVT::v4i8)) {
719    SDValue SetCC = N->getOperand(0);
720
721    if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
722      return SDValue();
723
724    return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty,
725                       SetCC.getOperand(0), SetCC.getOperand(1),
726                       N->getOperand(1), N->getOperand(2), SetCC.getOperand(2));
727  }
728
729  return SDValue();
730}
731
732static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
733                                 const MipsSubtarget *Subtarget) {
734  EVT Ty = N->getValueType(0);
735
736  if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
737    // Try the following combines:
738    //   (xor (or $a, $b), (build_vector allones))
739    //   (xor (or $a, $b), (bitcast (build_vector allones)))
740    SDValue Op0 = N->getOperand(0);
741    SDValue Op1 = N->getOperand(1);
742    SDValue NotOp;
743
744    if (ISD::isBuildVectorAllOnes(Op0.getNode()))
745      NotOp = Op1;
746    else if (ISD::isBuildVectorAllOnes(Op1.getNode()))
747      NotOp = Op0;
748    else
749      return SDValue();
750
751    if (NotOp->getOpcode() == ISD::OR)
752      return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0),
753                         NotOp->getOperand(1));
754  }
755
756  return SDValue();
757}
758
759SDValue
760MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
761  SelectionDAG &DAG = DCI.DAG;
762  SDValue Val;
763
764  switch (N->getOpcode()) {
765  case ISD::ADDE:
766    return performADDECombine(N, DAG, DCI, Subtarget);
767  case ISD::AND:
768    Val = performANDCombine(N, DAG, DCI, Subtarget);
769    break;
770  case ISD::SUBE:
771    return performSUBECombine(N, DAG, DCI, Subtarget);
772  case ISD::MUL:
773    return performMULCombine(N, DAG, DCI, this);
774  case ISD::SHL:
775    return performSHLCombine(N, DAG, DCI, Subtarget);
776  case ISD::SRA:
777    return performSRACombine(N, DAG, DCI, Subtarget);
778  case ISD::SRL:
779    return performSRLCombine(N, DAG, DCI, Subtarget);
780  case ISD::VSELECT:
781    return performVSELECTCombine(N, DAG);
782  case ISD::XOR:
783    Val = performXORCombine(N, DAG, Subtarget);
784    break;
785  case ISD::SETCC:
786    Val = performSETCCCombine(N, DAG);
787    break;
788  }
789
790  if (Val.getNode())
791    return Val;
792
793  return MipsTargetLowering::PerformDAGCombine(N, DCI);
794}
795
796MachineBasicBlock *
797MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
798                                                  MachineBasicBlock *BB) const {
799  switch (MI->getOpcode()) {
800  default:
801    return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
802  case Mips::BPOSGE32_PSEUDO:
803    return emitBPOSGE32(MI, BB);
804  case Mips::SNZ_B_PSEUDO:
805    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B);
806  case Mips::SNZ_H_PSEUDO:
807    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H);
808  case Mips::SNZ_W_PSEUDO:
809    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W);
810  case Mips::SNZ_D_PSEUDO:
811    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D);
812  case Mips::SNZ_V_PSEUDO:
813    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V);
814  case Mips::SZ_B_PSEUDO:
815    return emitMSACBranchPseudo(MI, BB, Mips::BZ_B);
816  case Mips::SZ_H_PSEUDO:
817    return emitMSACBranchPseudo(MI, BB, Mips::BZ_H);
818  case Mips::SZ_W_PSEUDO:
819    return emitMSACBranchPseudo(MI, BB, Mips::BZ_W);
820  case Mips::SZ_D_PSEUDO:
821    return emitMSACBranchPseudo(MI, BB, Mips::BZ_D);
822  case Mips::SZ_V_PSEUDO:
823    return emitMSACBranchPseudo(MI, BB, Mips::BZ_V);
824  }
825}
826
827bool MipsSETargetLowering::
828isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
829                                  unsigned NextStackOffset,
830                                  const MipsFunctionInfo& FI) const {
831  if (!EnableMipsTailCalls)
832    return false;
833
834  // Return false if either the callee or caller has a byval argument.
835  if (MipsCCInfo.hasByValArg() || FI.hasByvalArg())
836    return false;
837
838  // Return true if the callee's argument area is no larger than the
839  // caller's.
840  return NextStackOffset <= FI.getIncomingArgSize();
841}
842
843void MipsSETargetLowering::
844getOpndList(SmallVectorImpl<SDValue> &Ops,
845            std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
846            bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
847            CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
848  // T9 should contain the address of the callee function if
849  // -reloction-model=pic or it is an indirect call.
850  if (IsPICCall || !GlobalOrExternal) {
851    unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
852    RegsToPass.push_front(std::make_pair(T9Reg, Callee));
853  } else
854    Ops.push_back(Callee);
855
856  MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal,
857                                  InternalLinkage, CLI, Callee, Chain);
858}
859
860SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
861  LoadSDNode &Nd = *cast<LoadSDNode>(Op);
862
863  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
864    return MipsTargetLowering::lowerLOAD(Op, DAG);
865
866  // Replace a double precision load with two i32 loads and a buildpair64.
867  SDLoc DL(Op);
868  SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
869  EVT PtrVT = Ptr.getValueType();
870
871  // i32 load from lower address.
872  SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr,
873                           MachinePointerInfo(), Nd.isVolatile(),
874                           Nd.isNonTemporal(), Nd.isInvariant(),
875                           Nd.getAlignment());
876
877  // i32 load from higher address.
878  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
879  SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
880                           MachinePointerInfo(), Nd.isVolatile(),
881                           Nd.isNonTemporal(), Nd.isInvariant(),
882                           std::min(Nd.getAlignment(), 4U));
883
884  if (!Subtarget->isLittle())
885    std::swap(Lo, Hi);
886
887  SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
888  SDValue Ops[2] = {BP, Hi.getValue(1)};
889  return DAG.getMergeValues(Ops, 2, DL);
890}
891
892SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
893  StoreSDNode &Nd = *cast<StoreSDNode>(Op);
894
895  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
896    return MipsTargetLowering::lowerSTORE(Op, DAG);
897
898  // Replace a double precision store with two extractelement64s and i32 stores.
899  SDLoc DL(Op);
900  SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
901  EVT PtrVT = Ptr.getValueType();
902  SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
903                           Val, DAG.getConstant(0, MVT::i32));
904  SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
905                           Val, DAG.getConstant(1, MVT::i32));
906
907  if (!Subtarget->isLittle())
908    std::swap(Lo, Hi);
909
910  // i32 store to lower address.
911  Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(),
912                       Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(),
913                       Nd.getTBAAInfo());
914
915  // i32 store to higher address.
916  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
917  return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
918                      Nd.isVolatile(), Nd.isNonTemporal(),
919                      std::min(Nd.getAlignment(), 4U), Nd.getTBAAInfo());
920}
921
922SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
923                                          bool HasLo, bool HasHi,
924                                          SelectionDAG &DAG) const {
925  EVT Ty = Op.getOperand(0).getValueType();
926  SDLoc DL(Op);
927  SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped,
928                             Op.getOperand(0), Op.getOperand(1));
929  SDValue Lo, Hi;
930
931  if (HasLo)
932    Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
933                     DAG.getConstant(Mips::sub_lo, MVT::i32));
934  if (HasHi)
935    Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, Ty, Mult,
936                     DAG.getConstant(Mips::sub_hi, MVT::i32));
937
938  if (!HasLo || !HasHi)
939    return HasLo ? Lo : Hi;
940
941  SDValue Vals[] = { Lo, Hi };
942  return DAG.getMergeValues(Vals, 2, DL);
943}
944
945
946static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
947  SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
948                             DAG.getConstant(0, MVT::i32));
949  SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
950                             DAG.getConstant(1, MVT::i32));
951  return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi);
952}
953
954static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) {
955  SDValue Lo = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
956                           DAG.getConstant(Mips::sub_lo, MVT::i32));
957  SDValue Hi = DAG.getNode(MipsISD::ExtractLOHI, DL, MVT::i32, Op,
958                           DAG.getConstant(Mips::sub_hi, MVT::i32));
959  return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
960}
961
962// This function expands mips intrinsic nodes which have 64-bit input operands
963// or output values.
964//
965// out64 = intrinsic-node in64
966// =>
967// lo = copy (extract-element (in64, 0))
968// hi = copy (extract-element (in64, 1))
969// mips-specific-node
970// v0 = copy lo
971// v1 = copy hi
972// out64 = merge-values (v0, v1)
973//
974static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
975  SDLoc DL(Op);
976  bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
977  SmallVector<SDValue, 3> Ops;
978  unsigned OpNo = 0;
979
980  // See if Op has a chain input.
981  if (HasChainIn)
982    Ops.push_back(Op->getOperand(OpNo++));
983
984  // The next operand is the intrinsic opcode.
985  assert(Op->getOperand(OpNo).getOpcode() == ISD::TargetConstant);
986
987  // See if the next operand has type i64.
988  SDValue Opnd = Op->getOperand(++OpNo), In64;
989
990  if (Opnd.getValueType() == MVT::i64)
991    In64 = initAccumulator(Opnd, DL, DAG);
992  else
993    Ops.push_back(Opnd);
994
995  // Push the remaining operands.
996  for (++OpNo ; OpNo < Op->getNumOperands(); ++OpNo)
997    Ops.push_back(Op->getOperand(OpNo));
998
999  // Add In64 to the end of the list.
1000  if (In64.getNode())
1001    Ops.push_back(In64);
1002
1003  // Scan output.
1004  SmallVector<EVT, 2> ResTys;
1005
1006  for (SDNode::value_iterator I = Op->value_begin(), E = Op->value_end();
1007       I != E; ++I)
1008    ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I);
1009
1010  // Create node.
1011  SDValue Val = DAG.getNode(Opc, DL, ResTys, &Ops[0], Ops.size());
1012  SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val;
1013
1014  if (!HasChainIn)
1015    return Out;
1016
1017  assert(Val->getValueType(1) == MVT::Other);
1018  SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) };
1019  return DAG.getMergeValues(Vals, 2, DL);
1020}
1021
1022static SDValue lowerMSABinaryIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1023  SDLoc DL(Op);
1024  SDValue LHS = Op->getOperand(1);
1025  SDValue RHS = Op->getOperand(2);
1026  EVT ResTy = Op->getValueType(0);
1027
1028  SDValue Result = DAG.getNode(Opc, DL, ResTy, LHS, RHS);
1029
1030  return Result;
1031}
1032
1033static SDValue lowerMSABinaryImmIntr(SDValue Op, SelectionDAG &DAG,
1034                                     unsigned Opc, SDValue RHS) {
1035  SDValue LHS = Op->getOperand(1);
1036  EVT ResTy = Op->getValueType(0);
1037
1038  return DAG.getNode(Opc, SDLoc(Op), ResTy, LHS, RHS);
1039}
1040
1041static SDValue lowerMSABranchIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1042  SDLoc DL(Op);
1043  SDValue Value = Op->getOperand(1);
1044  EVT ResTy = Op->getValueType(0);
1045
1046  SDValue Result = DAG.getNode(Opc, DL, ResTy, Value);
1047
1048  return Result;
1049}
1050
1051// Lower an MSA copy intrinsic into the specified SelectionDAG node
1052static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1053  SDLoc DL(Op);
1054  SDValue Vec = Op->getOperand(1);
1055  SDValue Idx = Op->getOperand(2);
1056  EVT ResTy = Op->getValueType(0);
1057  EVT EltTy = Vec->getValueType(0).getVectorElementType();
1058
1059  SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx,
1060                               DAG.getValueType(EltTy));
1061
1062  return Result;
1063}
1064
1065// Lower an MSA insert intrinsic into the specified SelectionDAG node
1066static SDValue lowerMSAInsertIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1067  SDLoc DL(Op);
1068  SDValue Op0 = Op->getOperand(1);
1069  SDValue Op1 = Op->getOperand(2);
1070  SDValue Op2 = Op->getOperand(3);
1071  EVT ResTy = Op->getValueType(0);
1072
1073  SDValue Result = DAG.getNode(Opc, DL, ResTy, Op0, Op2, Op1);
1074
1075  return Result;
1076}
1077
1078static SDValue lowerMSASplatImm(SDValue Op, SDValue ImmOp, SelectionDAG &DAG) {
1079  EVT ResTy = Op->getValueType(0);
1080  EVT ViaVecTy = ResTy;
1081  SmallVector<SDValue, 16> Ops;
1082  SDValue ImmHiOp;
1083  SDLoc DL(Op);
1084
1085  if (ViaVecTy == MVT::v2i64) {
1086    ImmHiOp = DAG.getNode(ISD::SRA, DL, MVT::i32, ImmOp,
1087                          DAG.getConstant(31, MVT::i32));
1088    for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i) {
1089      Ops.push_back(ImmHiOp);
1090      Ops.push_back(ImmOp);
1091    }
1092    ViaVecTy = MVT::v4i32;
1093  } else {
1094    for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
1095      Ops.push_back(ImmOp);
1096  }
1097
1098  SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, &Ops[0],
1099                               Ops.size());
1100
1101  if (ResTy != ViaVecTy)
1102    Result = DAG.getNode(ISD::BITCAST, DL, ResTy, Result);
1103
1104  return Result;
1105}
1106
1107static SDValue
1108lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
1109  return lowerMSASplatImm(Op, Op->getOperand(ImmOp), DAG);
1110}
1111
1112static SDValue lowerMSAUnaryIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1113  SDLoc DL(Op);
1114  SDValue Value = Op->getOperand(1);
1115  EVT ResTy = Op->getValueType(0);
1116
1117  SDValue Result = DAG.getNode(Opc, DL, ResTy, Value);
1118
1119  return Result;
1120}
1121
1122SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
1123                                                      SelectionDAG &DAG) const {
1124  switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
1125  default:
1126    return SDValue();
1127  case Intrinsic::mips_shilo:
1128    return lowerDSPIntr(Op, DAG, MipsISD::SHILO);
1129  case Intrinsic::mips_dpau_h_qbl:
1130    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL);
1131  case Intrinsic::mips_dpau_h_qbr:
1132    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR);
1133  case Intrinsic::mips_dpsu_h_qbl:
1134    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL);
1135  case Intrinsic::mips_dpsu_h_qbr:
1136    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR);
1137  case Intrinsic::mips_dpa_w_ph:
1138    return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH);
1139  case Intrinsic::mips_dps_w_ph:
1140    return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH);
1141  case Intrinsic::mips_dpax_w_ph:
1142    return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH);
1143  case Intrinsic::mips_dpsx_w_ph:
1144    return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH);
1145  case Intrinsic::mips_mulsa_w_ph:
1146    return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH);
1147  case Intrinsic::mips_mult:
1148    return lowerDSPIntr(Op, DAG, MipsISD::Mult);
1149  case Intrinsic::mips_multu:
1150    return lowerDSPIntr(Op, DAG, MipsISD::Multu);
1151  case Intrinsic::mips_madd:
1152    return lowerDSPIntr(Op, DAG, MipsISD::MAdd);
1153  case Intrinsic::mips_maddu:
1154    return lowerDSPIntr(Op, DAG, MipsISD::MAddu);
1155  case Intrinsic::mips_msub:
1156    return lowerDSPIntr(Op, DAG, MipsISD::MSub);
1157  case Intrinsic::mips_msubu:
1158    return lowerDSPIntr(Op, DAG, MipsISD::MSubu);
1159  case Intrinsic::mips_addv_b:
1160  case Intrinsic::mips_addv_h:
1161  case Intrinsic::mips_addv_w:
1162  case Intrinsic::mips_addv_d:
1163    return lowerMSABinaryIntr(Op, DAG, ISD::ADD);
1164  case Intrinsic::mips_addvi_b:
1165  case Intrinsic::mips_addvi_h:
1166  case Intrinsic::mips_addvi_w:
1167  case Intrinsic::mips_addvi_d:
1168    return lowerMSABinaryImmIntr(Op, DAG, ISD::ADD,
1169                                 lowerMSASplatImm(Op, 2, DAG));
1170  case Intrinsic::mips_and_v:
1171    return lowerMSABinaryIntr(Op, DAG, ISD::AND);
1172  case Intrinsic::mips_andi_b:
1173    return lowerMSABinaryImmIntr(Op, DAG, ISD::AND,
1174                                 lowerMSASplatImm(Op, 2, DAG));
1175  case Intrinsic::mips_bnz_b:
1176  case Intrinsic::mips_bnz_h:
1177  case Intrinsic::mips_bnz_w:
1178  case Intrinsic::mips_bnz_d:
1179    return lowerMSABranchIntr(Op, DAG, MipsISD::VALL_NONZERO);
1180  case Intrinsic::mips_bnz_v:
1181    return lowerMSABranchIntr(Op, DAG, MipsISD::VANY_NONZERO);
1182  case Intrinsic::mips_bsel_v:
1183    return DAG.getNode(ISD::VSELECT, SDLoc(Op), Op->getValueType(0),
1184                       Op->getOperand(1), Op->getOperand(2),
1185                       Op->getOperand(3));
1186  case Intrinsic::mips_bseli_b:
1187    return DAG.getNode(ISD::VSELECT, SDLoc(Op), Op->getValueType(0),
1188                       Op->getOperand(1), Op->getOperand(2),
1189                       lowerMSASplatImm(Op, 3, DAG));
1190  case Intrinsic::mips_bz_b:
1191  case Intrinsic::mips_bz_h:
1192  case Intrinsic::mips_bz_w:
1193  case Intrinsic::mips_bz_d:
1194    return lowerMSABranchIntr(Op, DAG, MipsISD::VALL_ZERO);
1195  case Intrinsic::mips_bz_v:
1196    return lowerMSABranchIntr(Op, DAG, MipsISD::VANY_ZERO);
1197  case Intrinsic::mips_ceq_b:
1198  case Intrinsic::mips_ceq_h:
1199  case Intrinsic::mips_ceq_w:
1200  case Intrinsic::mips_ceq_d:
1201    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1202                        Op->getOperand(2), ISD::SETEQ);
1203  case Intrinsic::mips_ceqi_b:
1204  case Intrinsic::mips_ceqi_h:
1205  case Intrinsic::mips_ceqi_w:
1206  case Intrinsic::mips_ceqi_d:
1207    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1208                        lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ);
1209  case Intrinsic::mips_cle_s_b:
1210  case Intrinsic::mips_cle_s_h:
1211  case Intrinsic::mips_cle_s_w:
1212  case Intrinsic::mips_cle_s_d:
1213    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1214                        Op->getOperand(2), ISD::SETLE);
1215  case Intrinsic::mips_clei_s_b:
1216  case Intrinsic::mips_clei_s_h:
1217  case Intrinsic::mips_clei_s_w:
1218  case Intrinsic::mips_clei_s_d:
1219    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1220                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLE);
1221  case Intrinsic::mips_cle_u_b:
1222  case Intrinsic::mips_cle_u_h:
1223  case Intrinsic::mips_cle_u_w:
1224  case Intrinsic::mips_cle_u_d:
1225    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1226                        Op->getOperand(2), ISD::SETULE);
1227  case Intrinsic::mips_clei_u_b:
1228  case Intrinsic::mips_clei_u_h:
1229  case Intrinsic::mips_clei_u_w:
1230  case Intrinsic::mips_clei_u_d:
1231    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1232                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULE);
1233  case Intrinsic::mips_clt_s_b:
1234  case Intrinsic::mips_clt_s_h:
1235  case Intrinsic::mips_clt_s_w:
1236  case Intrinsic::mips_clt_s_d:
1237    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1238                        Op->getOperand(2), ISD::SETLT);
1239  case Intrinsic::mips_clti_s_b:
1240  case Intrinsic::mips_clti_s_h:
1241  case Intrinsic::mips_clti_s_w:
1242  case Intrinsic::mips_clti_s_d:
1243    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1244                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLT);
1245  case Intrinsic::mips_clt_u_b:
1246  case Intrinsic::mips_clt_u_h:
1247  case Intrinsic::mips_clt_u_w:
1248  case Intrinsic::mips_clt_u_d:
1249    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1250                        Op->getOperand(2), ISD::SETULT);
1251  case Intrinsic::mips_clti_u_b:
1252  case Intrinsic::mips_clti_u_h:
1253  case Intrinsic::mips_clti_u_w:
1254  case Intrinsic::mips_clti_u_d:
1255    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1256                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULT);
1257  case Intrinsic::mips_copy_s_b:
1258  case Intrinsic::mips_copy_s_h:
1259  case Intrinsic::mips_copy_s_w:
1260    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
1261  case Intrinsic::mips_copy_u_b:
1262  case Intrinsic::mips_copy_u_h:
1263  case Intrinsic::mips_copy_u_w:
1264    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
1265  case Intrinsic::mips_div_s_b:
1266  case Intrinsic::mips_div_s_h:
1267  case Intrinsic::mips_div_s_w:
1268  case Intrinsic::mips_div_s_d:
1269    return lowerMSABinaryIntr(Op, DAG, ISD::SDIV);
1270  case Intrinsic::mips_div_u_b:
1271  case Intrinsic::mips_div_u_h:
1272  case Intrinsic::mips_div_u_w:
1273  case Intrinsic::mips_div_u_d:
1274    return lowerMSABinaryIntr(Op, DAG, ISD::UDIV);
1275  case Intrinsic::mips_fadd_w:
1276  case Intrinsic::mips_fadd_d:
1277    return lowerMSABinaryIntr(Op, DAG, ISD::FADD);
1278  // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away
1279  case Intrinsic::mips_fceq_w:
1280  case Intrinsic::mips_fceq_d:
1281    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1282                        Op->getOperand(2), ISD::SETOEQ);
1283  case Intrinsic::mips_fcle_w:
1284  case Intrinsic::mips_fcle_d:
1285    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1286                        Op->getOperand(2), ISD::SETOLE);
1287  case Intrinsic::mips_fclt_w:
1288  case Intrinsic::mips_fclt_d:
1289    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1290                        Op->getOperand(2), ISD::SETOLT);
1291  case Intrinsic::mips_fcne_w:
1292  case Intrinsic::mips_fcne_d:
1293    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1294                        Op->getOperand(2), ISD::SETONE);
1295  case Intrinsic::mips_fcor_w:
1296  case Intrinsic::mips_fcor_d:
1297    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1298                        Op->getOperand(2), ISD::SETO);
1299  case Intrinsic::mips_fcueq_w:
1300  case Intrinsic::mips_fcueq_d:
1301    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1302                        Op->getOperand(2), ISD::SETUEQ);
1303  case Intrinsic::mips_fcule_w:
1304  case Intrinsic::mips_fcule_d:
1305    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1306                        Op->getOperand(2), ISD::SETULE);
1307  case Intrinsic::mips_fcult_w:
1308  case Intrinsic::mips_fcult_d:
1309    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1310                        Op->getOperand(2), ISD::SETULT);
1311  case Intrinsic::mips_fcun_w:
1312  case Intrinsic::mips_fcun_d:
1313    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1314                        Op->getOperand(2), ISD::SETUO);
1315  case Intrinsic::mips_fcune_w:
1316  case Intrinsic::mips_fcune_d:
1317    return DAG.getSetCC(SDLoc(Op), Op->getValueType(0), Op->getOperand(1),
1318                        Op->getOperand(2), ISD::SETUNE);
1319  case Intrinsic::mips_fdiv_w:
1320  case Intrinsic::mips_fdiv_d:
1321    return lowerMSABinaryIntr(Op, DAG, ISD::FDIV);
1322  case Intrinsic::mips_fill_b:
1323  case Intrinsic::mips_fill_h:
1324  case Intrinsic::mips_fill_w: {
1325    SmallVector<SDValue, 16> Ops;
1326    EVT ResTy = Op->getValueType(0);
1327
1328    for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
1329      Ops.push_back(Op->getOperand(1));
1330
1331    return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), ResTy, &Ops[0],
1332                       Ops.size());
1333  }
1334  case Intrinsic::mips_flog2_w:
1335  case Intrinsic::mips_flog2_d:
1336    return lowerMSAUnaryIntr(Op, DAG, ISD::FLOG2);
1337  case Intrinsic::mips_fmul_w:
1338  case Intrinsic::mips_fmul_d:
1339    return lowerMSABinaryIntr(Op, DAG, ISD::FMUL);
1340  case Intrinsic::mips_frint_w:
1341  case Intrinsic::mips_frint_d:
1342    return lowerMSAUnaryIntr(Op, DAG, ISD::FRINT);
1343  case Intrinsic::mips_fsqrt_w:
1344  case Intrinsic::mips_fsqrt_d:
1345    return lowerMSAUnaryIntr(Op, DAG, ISD::FSQRT);
1346  case Intrinsic::mips_fsub_w:
1347  case Intrinsic::mips_fsub_d:
1348    return lowerMSABinaryIntr(Op, DAG, ISD::FSUB);
1349  case Intrinsic::mips_ilvev_b:
1350  case Intrinsic::mips_ilvev_h:
1351  case Intrinsic::mips_ilvev_w:
1352  case Intrinsic::mips_ilvev_d:
1353    return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), Op->getValueType(0),
1354                       Op->getOperand(1), Op->getOperand(2));
1355  case Intrinsic::mips_ilvl_b:
1356  case Intrinsic::mips_ilvl_h:
1357  case Intrinsic::mips_ilvl_w:
1358  case Intrinsic::mips_ilvl_d:
1359    return DAG.getNode(MipsISD::ILVL, SDLoc(Op), Op->getValueType(0),
1360                       Op->getOperand(1), Op->getOperand(2));
1361  case Intrinsic::mips_ilvod_b:
1362  case Intrinsic::mips_ilvod_h:
1363  case Intrinsic::mips_ilvod_w:
1364  case Intrinsic::mips_ilvod_d:
1365    return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), Op->getValueType(0),
1366                       Op->getOperand(1), Op->getOperand(2));
1367  case Intrinsic::mips_ilvr_b:
1368  case Intrinsic::mips_ilvr_h:
1369  case Intrinsic::mips_ilvr_w:
1370  case Intrinsic::mips_ilvr_d:
1371    return DAG.getNode(MipsISD::ILVR, SDLoc(Op), Op->getValueType(0),
1372                       Op->getOperand(1), Op->getOperand(2));
1373  case Intrinsic::mips_insert_b:
1374  case Intrinsic::mips_insert_h:
1375  case Intrinsic::mips_insert_w:
1376    return lowerMSAInsertIntr(Op, DAG, ISD::INSERT_VECTOR_ELT);
1377  case Intrinsic::mips_ldi_b:
1378  case Intrinsic::mips_ldi_h:
1379  case Intrinsic::mips_ldi_w:
1380  case Intrinsic::mips_ldi_d:
1381    return lowerMSASplatImm(Op, 1, DAG);
1382  case Intrinsic::mips_max_s_b:
1383  case Intrinsic::mips_max_s_h:
1384  case Intrinsic::mips_max_s_w:
1385  case Intrinsic::mips_max_s_d:
1386    return lowerMSABinaryIntr(Op, DAG, MipsISD::VSMAX);
1387  case Intrinsic::mips_max_u_b:
1388  case Intrinsic::mips_max_u_h:
1389  case Intrinsic::mips_max_u_w:
1390  case Intrinsic::mips_max_u_d:
1391    return lowerMSABinaryIntr(Op, DAG, MipsISD::VUMAX);
1392  case Intrinsic::mips_maxi_s_b:
1393  case Intrinsic::mips_maxi_s_h:
1394  case Intrinsic::mips_maxi_s_w:
1395  case Intrinsic::mips_maxi_s_d:
1396    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VSMAX,
1397                                 lowerMSASplatImm(Op, 2, DAG));
1398  case Intrinsic::mips_maxi_u_b:
1399  case Intrinsic::mips_maxi_u_h:
1400  case Intrinsic::mips_maxi_u_w:
1401  case Intrinsic::mips_maxi_u_d:
1402    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VUMAX,
1403                                 lowerMSASplatImm(Op, 2, DAG));
1404  case Intrinsic::mips_min_s_b:
1405  case Intrinsic::mips_min_s_h:
1406  case Intrinsic::mips_min_s_w:
1407  case Intrinsic::mips_min_s_d:
1408    return lowerMSABinaryIntr(Op, DAG, MipsISD::VSMIN);
1409  case Intrinsic::mips_min_u_b:
1410  case Intrinsic::mips_min_u_h:
1411  case Intrinsic::mips_min_u_w:
1412  case Intrinsic::mips_min_u_d:
1413    return lowerMSABinaryIntr(Op, DAG, MipsISD::VUMIN);
1414  case Intrinsic::mips_mini_s_b:
1415  case Intrinsic::mips_mini_s_h:
1416  case Intrinsic::mips_mini_s_w:
1417  case Intrinsic::mips_mini_s_d:
1418    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VSMIN,
1419                                 lowerMSASplatImm(Op, 2, DAG));
1420  case Intrinsic::mips_mini_u_b:
1421  case Intrinsic::mips_mini_u_h:
1422  case Intrinsic::mips_mini_u_w:
1423  case Intrinsic::mips_mini_u_d:
1424    return lowerMSABinaryImmIntr(Op, DAG, MipsISD::VUMIN,
1425                                 lowerMSASplatImm(Op, 2, DAG));
1426  case Intrinsic::mips_mulv_b:
1427  case Intrinsic::mips_mulv_h:
1428  case Intrinsic::mips_mulv_w:
1429  case Intrinsic::mips_mulv_d:
1430    return lowerMSABinaryIntr(Op, DAG, ISD::MUL);
1431  case Intrinsic::mips_nlzc_b:
1432  case Intrinsic::mips_nlzc_h:
1433  case Intrinsic::mips_nlzc_w:
1434  case Intrinsic::mips_nlzc_d:
1435    return lowerMSAUnaryIntr(Op, DAG, ISD::CTLZ);
1436  case Intrinsic::mips_nor_v: {
1437    SDValue Res = lowerMSABinaryIntr(Op, DAG, ISD::OR);
1438    return DAG.getNOT(SDLoc(Op), Res, Res->getValueType(0));
1439  }
1440  case Intrinsic::mips_nori_b: {
1441    SDValue Res = lowerMSABinaryImmIntr(Op, DAG, ISD::OR,
1442                                        lowerMSASplatImm(Op, 2, DAG));
1443    return DAG.getNOT(SDLoc(Op), Res, Res->getValueType(0));
1444  }
1445  case Intrinsic::mips_or_v:
1446    return lowerMSABinaryIntr(Op, DAG, ISD::OR);
1447  case Intrinsic::mips_ori_b:
1448    return lowerMSABinaryImmIntr(Op, DAG, ISD::OR,
1449                                 lowerMSASplatImm(Op, 2, DAG));
1450  case Intrinsic::mips_pckev_b:
1451  case Intrinsic::mips_pckev_h:
1452  case Intrinsic::mips_pckev_w:
1453  case Intrinsic::mips_pckev_d:
1454    return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), Op->getValueType(0),
1455                       Op->getOperand(1), Op->getOperand(2));
1456  case Intrinsic::mips_pckod_b:
1457  case Intrinsic::mips_pckod_h:
1458  case Intrinsic::mips_pckod_w:
1459  case Intrinsic::mips_pckod_d:
1460    return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), Op->getValueType(0),
1461                       Op->getOperand(1), Op->getOperand(2));
1462  case Intrinsic::mips_pcnt_b:
1463  case Intrinsic::mips_pcnt_h:
1464  case Intrinsic::mips_pcnt_w:
1465  case Intrinsic::mips_pcnt_d:
1466    return lowerMSAUnaryIntr(Op, DAG, ISD::CTPOP);
1467  case Intrinsic::mips_shf_b:
1468  case Intrinsic::mips_shf_h:
1469  case Intrinsic::mips_shf_w:
1470    return DAG.getNode(MipsISD::SHF, SDLoc(Op), Op->getValueType(0),
1471                       Op->getOperand(2), Op->getOperand(1));
1472  case Intrinsic::mips_sll_b:
1473  case Intrinsic::mips_sll_h:
1474  case Intrinsic::mips_sll_w:
1475  case Intrinsic::mips_sll_d:
1476    return lowerMSABinaryIntr(Op, DAG, ISD::SHL);
1477  case Intrinsic::mips_slli_b:
1478  case Intrinsic::mips_slli_h:
1479  case Intrinsic::mips_slli_w:
1480  case Intrinsic::mips_slli_d:
1481    return lowerMSABinaryImmIntr(Op, DAG, ISD::SHL,
1482                                 lowerMSASplatImm(Op, 2, DAG));
1483  case Intrinsic::mips_sra_b:
1484  case Intrinsic::mips_sra_h:
1485  case Intrinsic::mips_sra_w:
1486  case Intrinsic::mips_sra_d:
1487    return lowerMSABinaryIntr(Op, DAG, ISD::SRA);
1488  case Intrinsic::mips_srai_b:
1489  case Intrinsic::mips_srai_h:
1490  case Intrinsic::mips_srai_w:
1491  case Intrinsic::mips_srai_d:
1492    return lowerMSABinaryImmIntr(Op, DAG, ISD::SRA,
1493                                 lowerMSASplatImm(Op, 2, DAG));
1494  case Intrinsic::mips_srl_b:
1495  case Intrinsic::mips_srl_h:
1496  case Intrinsic::mips_srl_w:
1497  case Intrinsic::mips_srl_d:
1498    return lowerMSABinaryIntr(Op, DAG, ISD::SRL);
1499  case Intrinsic::mips_srli_b:
1500  case Intrinsic::mips_srli_h:
1501  case Intrinsic::mips_srli_w:
1502  case Intrinsic::mips_srli_d:
1503    return lowerMSABinaryImmIntr(Op, DAG, ISD::SRL,
1504                                 lowerMSASplatImm(Op, 2, DAG));
1505  case Intrinsic::mips_subv_b:
1506  case Intrinsic::mips_subv_h:
1507  case Intrinsic::mips_subv_w:
1508  case Intrinsic::mips_subv_d:
1509    return lowerMSABinaryIntr(Op, DAG, ISD::SUB);
1510  case Intrinsic::mips_subvi_b:
1511  case Intrinsic::mips_subvi_h:
1512  case Intrinsic::mips_subvi_w:
1513  case Intrinsic::mips_subvi_d:
1514    return lowerMSABinaryImmIntr(Op, DAG, ISD::SUB,
1515                                 lowerMSASplatImm(Op, 2, DAG));
1516  case Intrinsic::mips_vshf_b:
1517  case Intrinsic::mips_vshf_h:
1518  case Intrinsic::mips_vshf_w:
1519  case Intrinsic::mips_vshf_d:
1520    return DAG.getNode(MipsISD::VSHF, SDLoc(Op), Op->getValueType(0),
1521                       Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
1522  case Intrinsic::mips_xor_v:
1523    return lowerMSABinaryIntr(Op, DAG, ISD::XOR);
1524  case Intrinsic::mips_xori_b:
1525    return lowerMSABinaryImmIntr(Op, DAG, ISD::XOR,
1526                                 lowerMSASplatImm(Op, 2, DAG));
1527  }
1528}
1529
1530static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
1531  SDLoc DL(Op);
1532  SDValue ChainIn = Op->getOperand(0);
1533  SDValue Address = Op->getOperand(2);
1534  SDValue Offset  = Op->getOperand(3);
1535  EVT ResTy = Op->getValueType(0);
1536  EVT PtrTy = Address->getValueType(0);
1537
1538  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
1539
1540  return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false,
1541                     false, false, 16);
1542}
1543
1544SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
1545                                                     SelectionDAG &DAG) const {
1546  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
1547  switch (Intr) {
1548  default:
1549    return SDValue();
1550  case Intrinsic::mips_extp:
1551    return lowerDSPIntr(Op, DAG, MipsISD::EXTP);
1552  case Intrinsic::mips_extpdp:
1553    return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP);
1554  case Intrinsic::mips_extr_w:
1555    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W);
1556  case Intrinsic::mips_extr_r_w:
1557    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W);
1558  case Intrinsic::mips_extr_rs_w:
1559    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W);
1560  case Intrinsic::mips_extr_s_h:
1561    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H);
1562  case Intrinsic::mips_mthlip:
1563    return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP);
1564  case Intrinsic::mips_mulsaq_s_w_ph:
1565    return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH);
1566  case Intrinsic::mips_maq_s_w_phl:
1567    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL);
1568  case Intrinsic::mips_maq_s_w_phr:
1569    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR);
1570  case Intrinsic::mips_maq_sa_w_phl:
1571    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL);
1572  case Intrinsic::mips_maq_sa_w_phr:
1573    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR);
1574  case Intrinsic::mips_dpaq_s_w_ph:
1575    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH);
1576  case Intrinsic::mips_dpsq_s_w_ph:
1577    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH);
1578  case Intrinsic::mips_dpaq_sa_l_w:
1579    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W);
1580  case Intrinsic::mips_dpsq_sa_l_w:
1581    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W);
1582  case Intrinsic::mips_dpaqx_s_w_ph:
1583    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH);
1584  case Intrinsic::mips_dpaqx_sa_w_ph:
1585    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH);
1586  case Intrinsic::mips_dpsqx_s_w_ph:
1587    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH);
1588  case Intrinsic::mips_dpsqx_sa_w_ph:
1589    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH);
1590  case Intrinsic::mips_ld_b:
1591  case Intrinsic::mips_ld_h:
1592  case Intrinsic::mips_ld_w:
1593  case Intrinsic::mips_ld_d:
1594  case Intrinsic::mips_ldx_b:
1595  case Intrinsic::mips_ldx_h:
1596  case Intrinsic::mips_ldx_w:
1597  case Intrinsic::mips_ldx_d:
1598   return lowerMSALoadIntr(Op, DAG, Intr);
1599  }
1600}
1601
1602static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
1603  SDLoc DL(Op);
1604  SDValue ChainIn = Op->getOperand(0);
1605  SDValue Value   = Op->getOperand(2);
1606  SDValue Address = Op->getOperand(3);
1607  SDValue Offset  = Op->getOperand(4);
1608  EVT PtrTy = Address->getValueType(0);
1609
1610  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
1611
1612  return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false,
1613                      false, 16);
1614}
1615
1616SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
1617                                                  SelectionDAG &DAG) const {
1618  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
1619  switch (Intr) {
1620  default:
1621    return SDValue();
1622  case Intrinsic::mips_st_b:
1623  case Intrinsic::mips_st_h:
1624  case Intrinsic::mips_st_w:
1625  case Intrinsic::mips_st_d:
1626  case Intrinsic::mips_stx_b:
1627  case Intrinsic::mips_stx_h:
1628  case Intrinsic::mips_stx_w:
1629  case Intrinsic::mips_stx_d:
1630    return lowerMSAStoreIntr(Op, DAG, Intr);
1631  }
1632}
1633
1634/// \brief Check if the given BuildVectorSDNode is a splat.
1635/// This method currently relies on DAG nodes being reused when equivalent,
1636/// so it's possible for this to return false even when isConstantSplat returns
1637/// true.
1638static bool isSplatVector(const BuildVectorSDNode *N) {
1639  unsigned int nOps = N->getNumOperands();
1640  assert(nOps > 1 && "isSplat has 0 or 1 sized build vector");
1641
1642  SDValue Operand0 = N->getOperand(0);
1643
1644  for (unsigned int i = 1; i < nOps; ++i) {
1645    if (N->getOperand(i) != Operand0)
1646      return false;
1647  }
1648
1649  return true;
1650}
1651
1652// Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT.
1653//
1654// The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We
1655// choose to sign-extend but we could have equally chosen zero-extend. The
1656// DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT
1657// result into this node later (possibly changing it to a zero-extend in the
1658// process).
1659SDValue MipsSETargetLowering::
1660lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
1661  SDLoc DL(Op);
1662  EVT ResTy = Op->getValueType(0);
1663  SDValue Op0 = Op->getOperand(0);
1664  SDValue Op1 = Op->getOperand(1);
1665  EVT EltTy = Op0->getValueType(0).getVectorElementType();
1666  return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1,
1667                     DAG.getValueType(EltTy));
1668}
1669
1670static bool isConstantOrUndef(const SDValue Op) {
1671  if (Op->getOpcode() == ISD::UNDEF)
1672    return true;
1673  if (dyn_cast<ConstantSDNode>(Op))
1674    return true;
1675  if (dyn_cast<ConstantFPSDNode>(Op))
1676    return true;
1677  return false;
1678}
1679
1680static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) {
1681  for (unsigned i = 0; i < Op->getNumOperands(); ++i)
1682    if (isConstantOrUndef(Op->getOperand(i)))
1683      return true;
1684  return false;
1685}
1686
1687// Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the
1688// backend.
1689//
1690// Lowers according to the following rules:
1691// - Constant splats are legal as-is as long as the SplatBitSize is a power of
1692//   2 less than or equal to 64 and the value fits into a signed 10-bit
1693//   immediate
1694// - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize
1695//   is a power of 2 less than or equal to 64 and the value does not fit into a
1696//   signed 10-bit immediate
1697// - Non-constant splats are legal as-is.
1698// - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT.
1699// - All others are illegal and must be expanded.
1700SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
1701                                                SelectionDAG &DAG) const {
1702  BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
1703  EVT ResTy = Op->getValueType(0);
1704  SDLoc DL(Op);
1705  APInt SplatValue, SplatUndef;
1706  unsigned SplatBitSize;
1707  bool HasAnyUndefs;
1708
1709  if (!Subtarget->hasMSA() || !ResTy.is128BitVector())
1710    return SDValue();
1711
1712  if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
1713                            HasAnyUndefs, 8,
1714                            !Subtarget->isLittle()) && SplatBitSize <= 64) {
1715    // We can only cope with 8, 16, 32, or 64-bit elements
1716    if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
1717        SplatBitSize != 64)
1718      return SDValue();
1719
1720    // If the value fits into a simm10 then we can use ldi.[bhwd]
1721    if (SplatValue.isSignedIntN(10))
1722      return Op;
1723
1724    EVT ViaVecTy;
1725
1726    switch (SplatBitSize) {
1727    default:
1728      return SDValue();
1729    case 8:
1730      ViaVecTy = MVT::v16i8;
1731      break;
1732    case 16:
1733      ViaVecTy = MVT::v8i16;
1734      break;
1735    case 32:
1736      ViaVecTy = MVT::v4i32;
1737      break;
1738    case 64:
1739      // There's no fill.d to fall back on for 64-bit values
1740      return SDValue();
1741    }
1742
1743    SmallVector<SDValue, 16> Ops;
1744    SDValue Constant = DAG.getConstant(SplatValue.sextOrSelf(32), MVT::i32);
1745
1746    for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i)
1747      Ops.push_back(Constant);
1748
1749    SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Node), ViaVecTy,
1750                                 &Ops[0], Ops.size());
1751
1752    if (ViaVecTy != ResTy)
1753      Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
1754
1755    return Result;
1756  } else if (isSplatVector(Node))
1757    return Op;
1758  else if (!isConstantOrUndefBUILD_VECTOR(Node)) {
1759    // Use INSERT_VECTOR_ELT operations rather than expand to stores.
1760    // The resulting code is the same length as the expansion, but it doesn't
1761    // use memory operations
1762    EVT ResTy = Node->getValueType(0);
1763
1764    assert(ResTy.isVector());
1765
1766    unsigned NumElts = ResTy.getVectorNumElements();
1767    SDValue Vector = DAG.getUNDEF(ResTy);
1768    for (unsigned i = 0; i < NumElts; ++i) {
1769      Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
1770                           Node->getOperand(i),
1771                           DAG.getConstant(i, MVT::i32));
1772    }
1773    return Vector;
1774  }
1775
1776  return SDValue();
1777}
1778
1779// Lower VECTOR_SHUFFLE into SHF (if possible).
1780//
1781// SHF splits the vector into blocks of four elements, then shuffles these
1782// elements according to a <4 x i2> constant (encoded as an integer immediate).
1783//
1784// It is therefore possible to lower into SHF when the mask takes the form:
1785//   <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
1786// When undef's appear they are treated as if they were whatever value is
1787// necessary in order to fit the above form.
1788//
1789// For example:
1790//   %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
1791//                      <8 x i32> <i32 3, i32 2, i32 1, i32 0,
1792//                                 i32 7, i32 6, i32 5, i32 4>
1793// is lowered to:
1794//   (SHF_H $w0, $w1, 27)
1795// where the 27 comes from:
1796//   3 + (2 << 2) + (1 << 4) + (0 << 6)
1797static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
1798                                       SmallVector<int, 16> Indices,
1799                                       SelectionDAG &DAG) {
1800  int SHFIndices[4] = { -1, -1, -1, -1 };
1801
1802  if (Indices.size() < 4)
1803    return SDValue();
1804
1805  for (unsigned i = 0; i < 4; ++i) {
1806    for (unsigned j = i; j < Indices.size(); j += 4) {
1807      int Idx = Indices[j];
1808
1809      // Convert from vector index to 4-element subvector index
1810      // If an index refers to an element outside of the subvector then give up
1811      if (Idx != -1) {
1812        Idx -= 4 * (j / 4);
1813        if (Idx < 0 || Idx >= 4)
1814          return SDValue();
1815      }
1816
1817      // If the mask has an undef, replace it with the current index.
1818      // Note that it might still be undef if the current index is also undef
1819      if (SHFIndices[i] == -1)
1820        SHFIndices[i] = Idx;
1821
1822      // Check that non-undef values are the same as in the mask. If they
1823      // aren't then give up
1824      if (!(Idx == -1 || Idx == SHFIndices[i]))
1825        return SDValue();
1826    }
1827  }
1828
1829  // Calculate the immediate. Replace any remaining undefs with zero
1830  APInt Imm(32, 0);
1831  for (int i = 3; i >= 0; --i) {
1832    int Idx = SHFIndices[i];
1833
1834    if (Idx == -1)
1835      Idx = 0;
1836
1837    Imm <<= 2;
1838    Imm |= Idx & 0x3;
1839  }
1840
1841  return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
1842                     DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
1843}
1844
1845// Lower VECTOR_SHUFFLE into ILVEV (if possible).
1846//
1847// ILVEV interleaves the even elements from each vector.
1848//
1849// It is possible to lower into ILVEV when the mask takes the form:
1850//   <0, n, 2, n+2, 4, n+4, ...>
1851// where n is the number of elements in the vector.
1852//
1853// When undef's appear in the mask they are treated as if they were whatever
1854// value is necessary in order to fit the above form.
1855static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy,
1856                                         SmallVector<int, 16> Indices,
1857                                         SelectionDAG &DAG) {
1858  assert ((Indices.size() % 2) == 0);
1859  int WsIdx = 0;
1860  int WtIdx = ResTy.getVectorNumElements();
1861
1862  for (unsigned i = 0; i < Indices.size(); i += 2) {
1863    if (Indices[i] != -1 && Indices[i] != WsIdx)
1864      return SDValue();
1865    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1866      return SDValue();
1867    WsIdx += 2;
1868    WtIdx += 2;
1869  }
1870
1871  return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0),
1872                     Op->getOperand(1));
1873}
1874
1875// Lower VECTOR_SHUFFLE into ILVOD (if possible).
1876//
1877// ILVOD interleaves the odd elements from each vector.
1878//
1879// It is possible to lower into ILVOD when the mask takes the form:
1880//   <1, n+1, 3, n+3, 5, n+5, ...>
1881// where n is the number of elements in the vector.
1882//
1883// When undef's appear in the mask they are treated as if they were whatever
1884// value is necessary in order to fit the above form.
1885static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy,
1886                                         SmallVector<int, 16> Indices,
1887                                         SelectionDAG &DAG) {
1888  assert ((Indices.size() % 2) == 0);
1889  int WsIdx = 1;
1890  int WtIdx = ResTy.getVectorNumElements() + 1;
1891
1892  for (unsigned i = 0; i < Indices.size(); i += 2) {
1893    if (Indices[i] != -1 && Indices[i] != WsIdx)
1894      return SDValue();
1895    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1896      return SDValue();
1897    WsIdx += 2;
1898    WtIdx += 2;
1899  }
1900
1901  return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0),
1902                     Op->getOperand(1));
1903}
1904
1905// Lower VECTOR_SHUFFLE into ILVL (if possible).
1906//
1907// ILVL interleaves consecutive elements from the left half of each vector.
1908//
1909// It is possible to lower into ILVL when the mask takes the form:
1910//   <0, n, 1, n+1, 2, n+2, ...>
1911// where n is the number of elements in the vector.
1912//
1913// When undef's appear in the mask they are treated as if they were whatever
1914// value is necessary in order to fit the above form.
1915static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
1916                                        SmallVector<int, 16> Indices,
1917                                        SelectionDAG &DAG) {
1918  assert ((Indices.size() % 2) == 0);
1919  int WsIdx = 0;
1920  int WtIdx = ResTy.getVectorNumElements();
1921
1922  for (unsigned i = 0; i < Indices.size(); i += 2) {
1923    if (Indices[i] != -1 && Indices[i] != WsIdx)
1924      return SDValue();
1925    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1926      return SDValue();
1927    WsIdx ++;
1928    WtIdx ++;
1929  }
1930
1931  return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0),
1932                     Op->getOperand(1));
1933}
1934
1935// Lower VECTOR_SHUFFLE into ILVR (if possible).
1936//
1937// ILVR interleaves consecutive elements from the right half of each vector.
1938//
1939// It is possible to lower into ILVR when the mask takes the form:
1940//   <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
1941// where n is the number of elements in the vector and x is half n.
1942//
1943// When undef's appear in the mask they are treated as if they were whatever
1944// value is necessary in order to fit the above form.
1945static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
1946                                        SmallVector<int, 16> Indices,
1947                                        SelectionDAG &DAG) {
1948  assert ((Indices.size() % 2) == 0);
1949  unsigned NumElts = ResTy.getVectorNumElements();
1950  int WsIdx = NumElts / 2;
1951  int WtIdx = NumElts + NumElts / 2;
1952
1953  for (unsigned i = 0; i < Indices.size(); i += 2) {
1954    if (Indices[i] != -1 && Indices[i] != WsIdx)
1955      return SDValue();
1956    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
1957      return SDValue();
1958    WsIdx ++;
1959    WtIdx ++;
1960  }
1961
1962  return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0),
1963                     Op->getOperand(1));
1964}
1965
1966// Lower VECTOR_SHUFFLE into PCKEV (if possible).
1967//
1968// PCKEV copies the even elements of each vector into the result vector.
1969//
1970// It is possible to lower into PCKEV when the mask takes the form:
1971//   <0, 2, 4, ..., n, n+2, n+4, ...>
1972// where n is the number of elements in the vector.
1973//
1974// When undef's appear in the mask they are treated as if they were whatever
1975// value is necessary in order to fit the above form.
1976static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy,
1977                                         SmallVector<int, 16> Indices,
1978                                         SelectionDAG &DAG) {
1979  assert ((Indices.size() % 2) == 0);
1980  int Idx = 0;
1981
1982  for (unsigned i = 0; i < Indices.size(); ++i) {
1983    if (Indices[i] != -1 && Indices[i] != Idx)
1984      return SDValue();
1985    Idx += 2;
1986  }
1987
1988  return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0),
1989                     Op->getOperand(1));
1990}
1991
1992// Lower VECTOR_SHUFFLE into PCKOD (if possible).
1993//
1994// PCKOD copies the odd elements of each vector into the result vector.
1995//
1996// It is possible to lower into PCKOD when the mask takes the form:
1997//   <1, 3, 5, ..., n+1, n+3, n+5, ...>
1998// where n is the number of elements in the vector.
1999//
2000// When undef's appear in the mask they are treated as if they were whatever
2001// value is necessary in order to fit the above form.
2002static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
2003                                         SmallVector<int, 16> Indices,
2004                                         SelectionDAG &DAG) {
2005  assert ((Indices.size() % 2) == 0);
2006  int Idx = 1;
2007
2008  for (unsigned i = 0; i < Indices.size(); ++i) {
2009    if (Indices[i] != -1 && Indices[i] != Idx)
2010      return SDValue();
2011    Idx += 2;
2012  }
2013
2014  return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0),
2015                     Op->getOperand(1));
2016}
2017
2018// Lower VECTOR_SHUFFLE into VSHF.
2019//
2020// This mostly consists of converting the shuffle indices in Indices into a
2021// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is
2022// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example,
2023// if the type is v8i16 and all the indices are less than 8 then the second
2024// operand is unused and can be replaced with anything. We choose to replace it
2025// with the used operand since this reduces the number of instructions overall.
2026static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
2027                                        SmallVector<int, 16> Indices,
2028                                        SelectionDAG &DAG) {
2029  SmallVector<SDValue, 16> Ops;
2030  SDValue Op0;
2031  SDValue Op1;
2032  EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger();
2033  EVT MaskEltTy = MaskVecTy.getVectorElementType();
2034  bool Using1stVec = false;
2035  bool Using2ndVec = false;
2036  SDLoc DL(Op);
2037  int ResTyNumElts = ResTy.getVectorNumElements();
2038
2039  for (int i = 0; i < ResTyNumElts; ++i) {
2040    // Idx == -1 means UNDEF
2041    int Idx = Indices[i];
2042
2043    if (0 <= Idx && Idx < ResTyNumElts)
2044      Using1stVec = true;
2045    if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2)
2046      Using2ndVec = true;
2047  }
2048
2049  for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
2050       ++I)
2051    Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
2052
2053  SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
2054                                Ops.size());
2055
2056  if (Using1stVec && Using2ndVec) {
2057    Op0 = Op->getOperand(0);
2058    Op1 = Op->getOperand(1);
2059  } else if (Using1stVec)
2060    Op0 = Op1 = Op->getOperand(0);
2061  else if (Using2ndVec)
2062    Op0 = Op1 = Op->getOperand(1);
2063  else
2064    llvm_unreachable("shuffle vector mask references neither vector operand?");
2065
2066  return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
2067}
2068
2069// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
2070// indices in the shuffle.
2071SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
2072                                                  SelectionDAG &DAG) const {
2073  ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op);
2074  EVT ResTy = Op->getValueType(0);
2075
2076  if (!ResTy.is128BitVector())
2077    return SDValue();
2078
2079  int ResTyNumElts = ResTy.getVectorNumElements();
2080  SmallVector<int, 16> Indices;
2081
2082  for (int i = 0; i < ResTyNumElts; ++i)
2083    Indices.push_back(Node->getMaskElt(i));
2084
2085  SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
2086  if (Result.getNode())
2087    return Result;
2088  Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
2089  if (Result.getNode())
2090    return Result;
2091  Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
2092  if (Result.getNode())
2093    return Result;
2094  Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
2095  if (Result.getNode())
2096    return Result;
2097  Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
2098  if (Result.getNode())
2099    return Result;
2100  Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
2101  if (Result.getNode())
2102    return Result;
2103  Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
2104  if (Result.getNode())
2105    return Result;
2106  return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
2107}
2108
2109MachineBasicBlock * MipsSETargetLowering::
2110emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
2111  // $bb:
2112  //  bposge32_pseudo $vr0
2113  //  =>
2114  // $bb:
2115  //  bposge32 $tbb
2116  // $fbb:
2117  //  li $vr2, 0
2118  //  b $sink
2119  // $tbb:
2120  //  li $vr1, 1
2121  // $sink:
2122  //  $vr0 = phi($vr2, $fbb, $vr1, $tbb)
2123
2124  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2125  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2126  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2127  DebugLoc DL = MI->getDebugLoc();
2128  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2129  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2130  MachineFunction *F = BB->getParent();
2131  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2132  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2133  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2134  F->insert(It, FBB);
2135  F->insert(It, TBB);
2136  F->insert(It, Sink);
2137
2138  // Transfer the remainder of BB and its successor edges to Sink.
2139  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2140               BB->end());
2141  Sink->transferSuccessorsAndUpdatePHIs(BB);
2142
2143  // Add successors.
2144  BB->addSuccessor(FBB);
2145  BB->addSuccessor(TBB);
2146  FBB->addSuccessor(Sink);
2147  TBB->addSuccessor(Sink);
2148
2149  // Insert the real bposge32 instruction to $BB.
2150  BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB);
2151
2152  // Fill $FBB.
2153  unsigned VR2 = RegInfo.createVirtualRegister(RC);
2154  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2)
2155    .addReg(Mips::ZERO).addImm(0);
2156  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2157
2158  // Fill $TBB.
2159  unsigned VR1 = RegInfo.createVirtualRegister(RC);
2160  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1)
2161    .addReg(Mips::ZERO).addImm(1);
2162
2163  // Insert phi function to $Sink.
2164  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2165          MI->getOperand(0).getReg())
2166    .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB);
2167
2168  MI->eraseFromParent();   // The pseudo instruction is gone now.
2169  return Sink;
2170}
2171
2172MachineBasicBlock * MipsSETargetLowering::
2173emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
2174                     unsigned BranchOp) const{
2175  // $bb:
2176  //  vany_nonzero $rd, $ws
2177  //  =>
2178  // $bb:
2179  //  bnz.b $ws, $tbb
2180  //  b $fbb
2181  // $fbb:
2182  //  li $rd1, 0
2183  //  b $sink
2184  // $tbb:
2185  //  li $rd2, 1
2186  // $sink:
2187  //  $rd = phi($rd1, $fbb, $rd2, $tbb)
2188
2189  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2190  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2191  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2192  DebugLoc DL = MI->getDebugLoc();
2193  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2194  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2195  MachineFunction *F = BB->getParent();
2196  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2197  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2198  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2199  F->insert(It, FBB);
2200  F->insert(It, TBB);
2201  F->insert(It, Sink);
2202
2203  // Transfer the remainder of BB and its successor edges to Sink.
2204  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2205               BB->end());
2206  Sink->transferSuccessorsAndUpdatePHIs(BB);
2207
2208  // Add successors.
2209  BB->addSuccessor(FBB);
2210  BB->addSuccessor(TBB);
2211  FBB->addSuccessor(Sink);
2212  TBB->addSuccessor(Sink);
2213
2214  // Insert the real bnz.b instruction to $BB.
2215  BuildMI(BB, DL, TII->get(BranchOp))
2216    .addReg(MI->getOperand(1).getReg())
2217    .addMBB(TBB);
2218
2219  // Fill $FBB.
2220  unsigned RD1 = RegInfo.createVirtualRegister(RC);
2221  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1)
2222    .addReg(Mips::ZERO).addImm(0);
2223  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2224
2225  // Fill $TBB.
2226  unsigned RD2 = RegInfo.createVirtualRegister(RC);
2227  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2)
2228    .addReg(Mips::ZERO).addImm(1);
2229
2230  // Insert phi function to $Sink.
2231  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2232          MI->getOperand(0).getReg())
2233    .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB);
2234
2235  MI->eraseFromParent();   // The pseudo instruction is gone now.
2236  return Sink;
2237}
2238