MipsSEISelLowering.cpp revision ea28aafa83fc2b6dd632041278c9a18e5a2b2b41
1//===-- MipsSEISelLowering.cpp - MipsSE DAG Lowering Interface --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Subclass of MipsTargetLowering specialized for mips32/64.
11//
12//===----------------------------------------------------------------------===//
13#define DEBUG_TYPE "mips-isel"
14#include "MipsSEISelLowering.h"
15#include "MipsRegisterInfo.h"
16#include "MipsTargetMachine.h"
17#include "llvm/CodeGen/MachineInstrBuilder.h"
18#include "llvm/CodeGen/MachineRegisterInfo.h"
19#include "llvm/IR/Intrinsics.h"
20#include "llvm/Support/CommandLine.h"
21#include "llvm/Support/Debug.h"
22#include "llvm/Support/raw_ostream.h"
23#include "llvm/Target/TargetInstrInfo.h"
24
25using namespace llvm;
26
27static cl::opt<bool>
28EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden,
29                    cl::desc("MIPS: Enable tail calls."), cl::init(false));
30
31static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false),
32                                   cl::desc("Expand double precision loads and "
33                                            "stores to their single precision "
34                                            "counterparts"));
35
36MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM)
37  : MipsTargetLowering(TM) {
38  // Set up the register classes
39  addRegisterClass(MVT::i32, &Mips::GPR32RegClass);
40
41  if (HasMips64)
42    addRegisterClass(MVT::i64, &Mips::GPR64RegClass);
43
44  if (Subtarget->hasDSP() || Subtarget->hasMSA()) {
45    // Expand all truncating stores and extending loads.
46    unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
47    unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE;
48
49    for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) {
50      for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1)
51        setTruncStoreAction((MVT::SimpleValueType)VT0,
52                            (MVT::SimpleValueType)VT1, Expand);
53
54      setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
55      setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand);
56      setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand);
57    }
58  }
59
60  if (Subtarget->hasDSP()) {
61    MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
62
63    for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
64      addRegisterClass(VecTys[i], &Mips::DSPRRegClass);
65
66      // Expand all builtin opcodes.
67      for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
68        setOperationAction(Opc, VecTys[i], Expand);
69
70      setOperationAction(ISD::ADD, VecTys[i], Legal);
71      setOperationAction(ISD::SUB, VecTys[i], Legal);
72      setOperationAction(ISD::LOAD, VecTys[i], Legal);
73      setOperationAction(ISD::STORE, VecTys[i], Legal);
74      setOperationAction(ISD::BITCAST, VecTys[i], Legal);
75    }
76
77    setTargetDAGCombine(ISD::SHL);
78    setTargetDAGCombine(ISD::SRA);
79    setTargetDAGCombine(ISD::SRL);
80    setTargetDAGCombine(ISD::SETCC);
81    setTargetDAGCombine(ISD::VSELECT);
82  }
83
84  if (Subtarget->hasDSPR2())
85    setOperationAction(ISD::MUL, MVT::v2i16, Legal);
86
87  if (Subtarget->hasMSA()) {
88    addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass);
89    addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass);
90    addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass);
91    addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass);
92    addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass);
93    addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass);
94    addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass);
95
96    setTargetDAGCombine(ISD::AND);
97    setTargetDAGCombine(ISD::OR);
98    setTargetDAGCombine(ISD::SRA);
99    setTargetDAGCombine(ISD::VSELECT);
100    setTargetDAGCombine(ISD::XOR);
101  }
102
103  if (!Subtarget->mipsSEUsesSoftFloat()) {
104    addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
105
106    // When dealing with single precision only, use libcalls
107    if (!Subtarget->isSingleFloat()) {
108      if (Subtarget->isFP64bit())
109        addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
110      else
111        addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
112    }
113  }
114
115  setOperationAction(ISD::SMUL_LOHI,          MVT::i32, Custom);
116  setOperationAction(ISD::UMUL_LOHI,          MVT::i32, Custom);
117  setOperationAction(ISD::MULHS,              MVT::i32, Custom);
118  setOperationAction(ISD::MULHU,              MVT::i32, Custom);
119
120  if (HasMips64) {
121    setOperationAction(ISD::MULHS,            MVT::i64, Custom);
122    setOperationAction(ISD::MULHU,            MVT::i64, Custom);
123    setOperationAction(ISD::MUL,              MVT::i64, Custom);
124  }
125
126  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
127  setOperationAction(ISD::INTRINSIC_W_CHAIN,  MVT::i64, Custom);
128
129  setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
130  setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
131  setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
132  setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
133  setOperationAction(ISD::ATOMIC_FENCE,       MVT::Other, Custom);
134  setOperationAction(ISD::LOAD,               MVT::i32, Custom);
135  setOperationAction(ISD::STORE,              MVT::i32, Custom);
136
137  setTargetDAGCombine(ISD::ADDE);
138  setTargetDAGCombine(ISD::SUBE);
139  setTargetDAGCombine(ISD::MUL);
140
141  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
142  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
143  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
144
145  if (NoDPLoadStore) {
146    setOperationAction(ISD::LOAD, MVT::f64, Custom);
147    setOperationAction(ISD::STORE, MVT::f64, Custom);
148  }
149
150  computeRegisterProperties();
151}
152
153const MipsTargetLowering *
154llvm::createMipsSETargetLowering(MipsTargetMachine &TM) {
155  return new MipsSETargetLowering(TM);
156}
157
158// Enable MSA support for the given integer type and Register class.
159void MipsSETargetLowering::
160addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
161  addRegisterClass(Ty, RC);
162
163  // Expand all builtin opcodes.
164  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
165    setOperationAction(Opc, Ty, Expand);
166
167  setOperationAction(ISD::BITCAST, Ty, Legal);
168  setOperationAction(ISD::LOAD, Ty, Legal);
169  setOperationAction(ISD::STORE, Ty, Legal);
170  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom);
171  setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
172  setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
173
174  setOperationAction(ISD::ADD, Ty, Legal);
175  setOperationAction(ISD::AND, Ty, Legal);
176  setOperationAction(ISD::CTLZ, Ty, Legal);
177  setOperationAction(ISD::CTPOP, Ty, Legal);
178  setOperationAction(ISD::MUL, Ty, Legal);
179  setOperationAction(ISD::OR, Ty, Legal);
180  setOperationAction(ISD::SDIV, Ty, Legal);
181  setOperationAction(ISD::SREM, Ty, Legal);
182  setOperationAction(ISD::SHL, Ty, Legal);
183  setOperationAction(ISD::SRA, Ty, Legal);
184  setOperationAction(ISD::SRL, Ty, Legal);
185  setOperationAction(ISD::SUB, Ty, Legal);
186  setOperationAction(ISD::UDIV, Ty, Legal);
187  setOperationAction(ISD::UREM, Ty, Legal);
188  setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom);
189  setOperationAction(ISD::VSELECT, Ty, Legal);
190  setOperationAction(ISD::XOR, Ty, Legal);
191
192  if (Ty == MVT::v4i32 || Ty == MVT::v2i64) {
193    setOperationAction(ISD::FP_TO_SINT, Ty, Legal);
194    setOperationAction(ISD::FP_TO_UINT, Ty, Legal);
195    setOperationAction(ISD::SINT_TO_FP, Ty, Legal);
196    setOperationAction(ISD::UINT_TO_FP, Ty, Legal);
197  }
198
199  setOperationAction(ISD::SETCC, Ty, Legal);
200  setCondCodeAction(ISD::SETNE, Ty, Expand);
201  setCondCodeAction(ISD::SETGE, Ty, Expand);
202  setCondCodeAction(ISD::SETGT, Ty, Expand);
203  setCondCodeAction(ISD::SETUGE, Ty, Expand);
204  setCondCodeAction(ISD::SETUGT, Ty, Expand);
205}
206
207// Enable MSA support for the given floating-point type and Register class.
208void MipsSETargetLowering::
209addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) {
210  addRegisterClass(Ty, RC);
211
212  // Expand all builtin opcodes.
213  for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
214    setOperationAction(Opc, Ty, Expand);
215
216  setOperationAction(ISD::LOAD, Ty, Legal);
217  setOperationAction(ISD::STORE, Ty, Legal);
218  setOperationAction(ISD::BITCAST, Ty, Legal);
219  setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal);
220  setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal);
221  setOperationAction(ISD::BUILD_VECTOR, Ty, Custom);
222
223  if (Ty != MVT::v8f16) {
224    setOperationAction(ISD::FABS,  Ty, Legal);
225    setOperationAction(ISD::FADD,  Ty, Legal);
226    setOperationAction(ISD::FDIV,  Ty, Legal);
227    setOperationAction(ISD::FEXP2, Ty, Legal);
228    setOperationAction(ISD::FLOG2, Ty, Legal);
229    setOperationAction(ISD::FMA,   Ty, Legal);
230    setOperationAction(ISD::FMUL,  Ty, Legal);
231    setOperationAction(ISD::FRINT, Ty, Legal);
232    setOperationAction(ISD::FSQRT, Ty, Legal);
233    setOperationAction(ISD::FSUB,  Ty, Legal);
234    setOperationAction(ISD::VSELECT, Ty, Legal);
235
236    setOperationAction(ISD::SETCC, Ty, Legal);
237    setCondCodeAction(ISD::SETOGE, Ty, Expand);
238    setCondCodeAction(ISD::SETOGT, Ty, Expand);
239    setCondCodeAction(ISD::SETUGE, Ty, Expand);
240    setCondCodeAction(ISD::SETUGT, Ty, Expand);
241    setCondCodeAction(ISD::SETGE,  Ty, Expand);
242    setCondCodeAction(ISD::SETGT,  Ty, Expand);
243  }
244}
245
246bool
247MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const {
248  MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
249
250  switch (SVT) {
251  case MVT::i64:
252  case MVT::i32:
253    if (Fast)
254      *Fast = true;
255    return true;
256  default:
257    return false;
258  }
259}
260
261SDValue MipsSETargetLowering::LowerOperation(SDValue Op,
262                                             SelectionDAG &DAG) const {
263  switch(Op.getOpcode()) {
264  case ISD::LOAD:  return lowerLOAD(Op, DAG);
265  case ISD::STORE: return lowerSTORE(Op, DAG);
266  case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG);
267  case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG);
268  case ISD::MULHS:     return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG);
269  case ISD::MULHU:     return lowerMulDiv(Op, MipsISD::Multu, false, true, DAG);
270  case ISD::MUL:       return lowerMulDiv(Op, MipsISD::Mult, true, false, DAG);
271  case ISD::SDIVREM:   return lowerMulDiv(Op, MipsISD::DivRem, true, true, DAG);
272  case ISD::UDIVREM:   return lowerMulDiv(Op, MipsISD::DivRemU, true, true,
273                                          DAG);
274  case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG);
275  case ISD::INTRINSIC_W_CHAIN:  return lowerINTRINSIC_W_CHAIN(Op, DAG);
276  case ISD::INTRINSIC_VOID:     return lowerINTRINSIC_VOID(Op, DAG);
277  case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG);
278  case ISD::BUILD_VECTOR:       return lowerBUILD_VECTOR(Op, DAG);
279  case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, DAG);
280  }
281
282  return MipsTargetLowering::LowerOperation(Op, DAG);
283}
284
285// selectMADD -
286// Transforms a subgraph in CurDAG if the following pattern is found:
287//  (addc multLo, Lo0), (adde multHi, Hi0),
288// where,
289//  multHi/Lo: product of multiplication
290//  Lo0: initial value of Lo register
291//  Hi0: initial value of Hi register
292// Return true if pattern matching was successful.
293static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) {
294  // ADDENode's second operand must be a flag output of an ADDC node in order
295  // for the matching to be successful.
296  SDNode *ADDCNode = ADDENode->getOperand(2).getNode();
297
298  if (ADDCNode->getOpcode() != ISD::ADDC)
299    return false;
300
301  SDValue MultHi = ADDENode->getOperand(0);
302  SDValue MultLo = ADDCNode->getOperand(0);
303  SDNode *MultNode = MultHi.getNode();
304  unsigned MultOpc = MultHi.getOpcode();
305
306  // MultHi and MultLo must be generated by the same node,
307  if (MultLo.getNode() != MultNode)
308    return false;
309
310  // and it must be a multiplication.
311  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
312    return false;
313
314  // MultLo amd MultHi must be the first and second output of MultNode
315  // respectively.
316  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
317    return false;
318
319  // Transform this to a MADD only if ADDENode and ADDCNode are the only users
320  // of the values of MultNode, in which case MultNode will be removed in later
321  // phases.
322  // If there exist users other than ADDENode or ADDCNode, this function returns
323  // here, which will result in MultNode being mapped to a single MULT
324  // instruction node rather than a pair of MULT and MADD instructions being
325  // produced.
326  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
327    return false;
328
329  SDLoc DL(ADDENode);
330
331  // Initialize accumulator.
332  SDValue ACCIn = CurDAG->getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
333                                  ADDCNode->getOperand(1),
334                                  ADDENode->getOperand(1));
335
336  // create MipsMAdd(u) node
337  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd;
338
339  SDValue MAdd = CurDAG->getNode(MultOpc, DL, MVT::Untyped,
340                                 MultNode->getOperand(0),// Factor 0
341                                 MultNode->getOperand(1),// Factor 1
342                                 ACCIn);
343
344  // replace uses of adde and addc here
345  if (!SDValue(ADDCNode, 0).use_empty()) {
346    SDValue LoOut = CurDAG->getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
347    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut);
348  }
349  if (!SDValue(ADDENode, 0).use_empty()) {
350    SDValue HiOut = CurDAG->getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
351    CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut);
352  }
353
354  return true;
355}
356
357// selectMSUB -
358// Transforms a subgraph in CurDAG if the following pattern is found:
359//  (addc Lo0, multLo), (sube Hi0, multHi),
360// where,
361//  multHi/Lo: product of multiplication
362//  Lo0: initial value of Lo register
363//  Hi0: initial value of Hi register
364// Return true if pattern matching was successful.
365static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) {
366  // SUBENode's second operand must be a flag output of an SUBC node in order
367  // for the matching to be successful.
368  SDNode *SUBCNode = SUBENode->getOperand(2).getNode();
369
370  if (SUBCNode->getOpcode() != ISD::SUBC)
371    return false;
372
373  SDValue MultHi = SUBENode->getOperand(1);
374  SDValue MultLo = SUBCNode->getOperand(1);
375  SDNode *MultNode = MultHi.getNode();
376  unsigned MultOpc = MultHi.getOpcode();
377
378  // MultHi and MultLo must be generated by the same node,
379  if (MultLo.getNode() != MultNode)
380    return false;
381
382  // and it must be a multiplication.
383  if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
384    return false;
385
386  // MultLo amd MultHi must be the first and second output of MultNode
387  // respectively.
388  if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
389    return false;
390
391  // Transform this to a MSUB only if SUBENode and SUBCNode are the only users
392  // of the values of MultNode, in which case MultNode will be removed in later
393  // phases.
394  // If there exist users other than SUBENode or SUBCNode, this function returns
395  // here, which will result in MultNode being mapped to a single MULT
396  // instruction node rather than a pair of MULT and MSUB instructions being
397  // produced.
398  if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
399    return false;
400
401  SDLoc DL(SUBENode);
402
403  // Initialize accumulator.
404  SDValue ACCIn = CurDAG->getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
405                                  SUBCNode->getOperand(0),
406                                  SUBENode->getOperand(0));
407
408  // create MipsSub(u) node
409  MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub;
410
411  SDValue MSub = CurDAG->getNode(MultOpc, DL, MVT::Glue,
412                                 MultNode->getOperand(0),// Factor 0
413                                 MultNode->getOperand(1),// Factor 1
414                                 ACCIn);
415
416  // replace uses of sube and subc here
417  if (!SDValue(SUBCNode, 0).use_empty()) {
418    SDValue LoOut = CurDAG->getNode(MipsISD::MFLO, DL, MVT::i32, MSub);
419    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut);
420  }
421  if (!SDValue(SUBENode, 0).use_empty()) {
422    SDValue HiOut = CurDAG->getNode(MipsISD::MFHI, DL, MVT::i32, MSub);
423    CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut);
424  }
425
426  return true;
427}
428
429static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG,
430                                  TargetLowering::DAGCombinerInfo &DCI,
431                                  const MipsSubtarget *Subtarget) {
432  if (DCI.isBeforeLegalize())
433    return SDValue();
434
435  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
436      selectMADD(N, &DAG))
437    return SDValue(N, 0);
438
439  return SDValue();
440}
441
442// Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT
443//
444// Performs the following transformations:
445// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its
446//   sign/zero-extension is completely overwritten by the new one performed by
447//   the ISD::AND.
448// - Removes redundant zero extensions performed by an ISD::AND.
449static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
450                                 TargetLowering::DAGCombinerInfo &DCI,
451                                 const MipsSubtarget *Subtarget) {
452  if (!Subtarget->hasMSA())
453    return SDValue();
454
455  SDValue Op0 = N->getOperand(0);
456  SDValue Op1 = N->getOperand(1);
457  unsigned Op0Opcode = Op0->getOpcode();
458
459  // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d)
460  // where $d + 1 == 2^n and n == 32
461  // or    $d + 1 == 2^n and n <= 32 and ZExt
462  // -> (MipsVExtractZExt $a, $b, $c)
463  if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT ||
464      Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) {
465    ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1);
466
467    if (!Mask)
468      return SDValue();
469
470    int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2();
471
472    if (Log2IfPositive <= 0)
473      return SDValue(); // Mask+1 is not a power of 2
474
475    SDValue Op0Op2 = Op0->getOperand(2);
476    EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT();
477    unsigned ExtendTySize = ExtendTy.getSizeInBits();
478    unsigned Log2 = Log2IfPositive;
479
480    if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) ||
481        Log2 == ExtendTySize) {
482      SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 };
483      DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT,
484                      Op0->getVTList(), Ops, Op0->getNumOperands());
485      return Op0;
486    }
487  }
488
489  return SDValue();
490}
491
492// Determine if the specified node is a constant vector splat.
493//
494// Returns true and sets Imm if:
495// * N is a ISD::BUILD_VECTOR representing a constant splat
496//
497// This function is quite similar to MipsSEDAGToDAGISel::selectVSplat. The
498// differences are that it assumes the MSA has already been checked and the
499// arbitrary requirement for a maximum of 32-bit integers isn't applied (and
500// must not be in order for binsri.d to be selectable).
501static bool isVSplat(SDValue N, APInt &Imm, bool IsLittleEndian) {
502  BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N.getNode());
503
504  if (Node == NULL)
505    return false;
506
507  APInt SplatValue, SplatUndef;
508  unsigned SplatBitSize;
509  bool HasAnyUndefs;
510
511  if (!Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
512                             8, !IsLittleEndian))
513    return false;
514
515  Imm = SplatValue;
516
517  return true;
518}
519
520// Test whether the given node is an all-ones build_vector.
521static bool isVectorAllOnes(SDValue N) {
522  // Look through bitcasts. Endianness doesn't matter because we are looking
523  // for an all-ones value.
524  if (N->getOpcode() == ISD::BITCAST)
525    N = N->getOperand(0);
526
527  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
528
529  if (!BVN)
530    return false;
531
532  APInt SplatValue, SplatUndef;
533  unsigned SplatBitSize;
534  bool HasAnyUndefs;
535
536  // Endianness doesn't matter in this context because we are looking for
537  // an all-ones value.
538  if (BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs))
539    return SplatValue.isAllOnesValue();
540
541  return false;
542}
543
544// Test whether N is the bitwise inverse of OfNode.
545static bool isBitwiseInverse(SDValue N, SDValue OfNode) {
546  if (N->getOpcode() != ISD::XOR)
547    return false;
548
549  if (isVectorAllOnes(N->getOperand(0)))
550    return N->getOperand(1) == OfNode;
551
552  if (isVectorAllOnes(N->getOperand(1)))
553    return N->getOperand(0) == OfNode;
554
555  return false;
556}
557
558// Perform combines where ISD::OR is the root node.
559//
560// Performs the following transformations:
561// - (or (and $a, $mask), (and $b, $inv_mask)) => (vselect $mask, $a, $b)
562//   where $inv_mask is the bitwise inverse of $mask and the 'or' has a 128-bit
563//   vector type.
564static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
565                                TargetLowering::DAGCombinerInfo &DCI,
566                                const MipsSubtarget *Subtarget) {
567  if (!Subtarget->hasMSA())
568    return SDValue();
569
570  EVT Ty = N->getValueType(0);
571
572  if (!Ty.is128BitVector())
573    return SDValue();
574
575  SDValue Op0 = N->getOperand(0);
576  SDValue Op1 = N->getOperand(1);
577
578  if (Op0->getOpcode() == ISD::AND && Op1->getOpcode() == ISD::AND) {
579    SDValue Op0Op0 = Op0->getOperand(0);
580    SDValue Op0Op1 = Op0->getOperand(1);
581    SDValue Op1Op0 = Op1->getOperand(0);
582    SDValue Op1Op1 = Op1->getOperand(1);
583    bool IsLittleEndian = !Subtarget->isLittle();
584
585    SDValue IfSet, IfClr, Cond;
586    bool IsConstantMask = false;
587    APInt Mask, InvMask;
588
589    // If Op0Op0 is an appropriate mask, try to find it's inverse in either
590    // Op1Op0, or Op1Op1. Keep track of the Cond, IfSet, and IfClr nodes, while
591    // looking.
592    // IfClr will be set if we find a valid match.
593    if (isVSplat(Op0Op0, Mask, IsLittleEndian)) {
594      Cond = Op0Op0;
595      IfSet = Op0Op1;
596
597      if (isVSplat(Op1Op0, InvMask, IsLittleEndian) && Mask == ~InvMask)
598        IfClr = Op1Op1;
599      else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) && Mask == ~InvMask)
600        IfClr = Op1Op0;
601
602      IsConstantMask = true;
603    }
604
605    // If IfClr is not yet set, and Op0Op1 is an appropriate mask, try the same
606    // thing again using this mask.
607    // IfClr will be set if we find a valid match.
608    if (!IfClr.getNode() && isVSplat(Op0Op1, Mask, IsLittleEndian)) {
609      Cond = Op0Op1;
610      IfSet = Op0Op0;
611
612      if (isVSplat(Op1Op0, InvMask, IsLittleEndian) && Mask == ~InvMask)
613        IfClr = Op1Op1;
614      else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) && Mask == ~InvMask)
615        IfClr = Op1Op0;
616
617      IsConstantMask = true;
618    }
619
620    // If IfClr is not yet set, try looking for a non-constant match.
621    // IfClr will be set if we find a valid match amongst the eight
622    // possibilities.
623    if (!IfClr.getNode()) {
624      if (isBitwiseInverse(Op0Op0, Op1Op0)) {
625        Cond = Op1Op0;
626        IfSet = Op1Op1;
627        IfClr = Op0Op1;
628      } else if (isBitwiseInverse(Op0Op1, Op1Op0)) {
629        Cond = Op1Op0;
630        IfSet = Op1Op1;
631        IfClr = Op0Op0;
632      } else if (isBitwiseInverse(Op0Op0, Op1Op1)) {
633        Cond = Op1Op1;
634        IfSet = Op1Op0;
635        IfClr = Op0Op1;
636      } else if (isBitwiseInverse(Op0Op1, Op1Op1)) {
637        Cond = Op1Op1;
638        IfSet = Op1Op0;
639        IfClr = Op0Op0;
640      } else if (isBitwiseInverse(Op1Op0, Op0Op0)) {
641        Cond = Op0Op0;
642        IfSet = Op0Op1;
643        IfClr = Op1Op1;
644      } else if (isBitwiseInverse(Op1Op1, Op0Op0)) {
645        Cond = Op0Op0;
646        IfSet = Op0Op1;
647        IfClr = Op1Op0;
648      } else if (isBitwiseInverse(Op1Op0, Op0Op1)) {
649        Cond = Op0Op1;
650        IfSet = Op0Op0;
651        IfClr = Op1Op1;
652      } else if (isBitwiseInverse(Op1Op1, Op0Op1)) {
653        Cond = Op0Op1;
654        IfSet = Op0Op0;
655        IfClr = Op1Op0;
656      }
657    }
658
659    // At this point, IfClr will be set if we have a valid match.
660    if (!IfClr.getNode())
661      return SDValue();
662
663    assert(Cond.getNode() && IfSet.getNode());
664
665    // Fold degenerate cases.
666    if (IsConstantMask) {
667      if (Mask.isAllOnesValue())
668        return IfSet;
669      else if (Mask == 0)
670        return IfClr;
671    }
672
673    // Transform the DAG into an equivalent VSELECT.
674    return DAG.getNode(ISD::VSELECT, SDLoc(N), Ty, Cond, IfClr, IfSet);
675  }
676
677  return SDValue();
678}
679
680static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG,
681                                  TargetLowering::DAGCombinerInfo &DCI,
682                                  const MipsSubtarget *Subtarget) {
683  if (DCI.isBeforeLegalize())
684    return SDValue();
685
686  if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
687      selectMSUB(N, &DAG))
688    return SDValue(N, 0);
689
690  return SDValue();
691}
692
693static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT,
694                            EVT ShiftTy, SelectionDAG &DAG) {
695  // Clear the upper (64 - VT.sizeInBits) bits.
696  C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits());
697
698  // Return 0.
699  if (C == 0)
700    return DAG.getConstant(0, VT);
701
702  // Return x.
703  if (C == 1)
704    return X;
705
706  // If c is power of 2, return (shl x, log2(c)).
707  if (isPowerOf2_64(C))
708    return DAG.getNode(ISD::SHL, DL, VT, X,
709                       DAG.getConstant(Log2_64(C), ShiftTy));
710
711  unsigned Log2Ceil = Log2_64_Ceil(C);
712  uint64_t Floor = 1LL << Log2_64(C);
713  uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil;
714
715  // If |c - floor_c| <= |c - ceil_c|,
716  // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))),
717  // return (add constMult(x, floor_c), constMult(x, c - floor_c)).
718  if (C - Floor <= Ceil - C) {
719    SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG);
720    SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG);
721    return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
722  }
723
724  // If |c - floor_c| > |c - ceil_c|,
725  // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)).
726  SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG);
727  SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG);
728  return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
729}
730
731static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
732                                 const TargetLowering::DAGCombinerInfo &DCI,
733                                 const MipsSETargetLowering *TL) {
734  EVT VT = N->getValueType(0);
735
736  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
737    if (!VT.isVector())
738      return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N),
739                          VT, TL->getScalarShiftAmountTy(VT), DAG);
740
741  return SDValue(N, 0);
742}
743
744static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty,
745                                      SelectionDAG &DAG,
746                                      const MipsSubtarget *Subtarget) {
747  // See if this is a vector splat immediate node.
748  APInt SplatValue, SplatUndef;
749  unsigned SplatBitSize;
750  bool HasAnyUndefs;
751  unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
752  BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
753
754  if (!BV ||
755      !BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs,
756                           EltSize, !Subtarget->isLittle()) ||
757      (SplatBitSize != EltSize) ||
758      (SplatValue.getZExtValue() >= EltSize))
759    return SDValue();
760
761  return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
762                     DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
763}
764
765static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
766                                 TargetLowering::DAGCombinerInfo &DCI,
767                                 const MipsSubtarget *Subtarget) {
768  EVT Ty = N->getValueType(0);
769
770  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
771    return SDValue();
772
773  return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget);
774}
775
776// Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold
777// constant splats into MipsISD::SHRA_DSP for DSPr2.
778//
779// Performs the following transformations:
780// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its
781//   sign/zero-extension is completely overwritten by the new one performed by
782//   the ISD::SRA and ISD::SHL nodes.
783// - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL
784//   sequence.
785//
786// See performDSPShiftCombine for more information about the transformation
787// used for DSPr2.
788static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
789                                 TargetLowering::DAGCombinerInfo &DCI,
790                                 const MipsSubtarget *Subtarget) {
791  EVT Ty = N->getValueType(0);
792
793  if (Subtarget->hasMSA()) {
794    SDValue Op0 = N->getOperand(0);
795    SDValue Op1 = N->getOperand(1);
796
797    // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d)
798    // where $d + sizeof($c) == 32
799    // or    $d + sizeof($c) <= 32 and SExt
800    // -> (MipsVExtractSExt $a, $b, $c)
801    if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) {
802      SDValue Op0Op0 = Op0->getOperand(0);
803      ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1);
804
805      if (!ShAmount)
806        return SDValue();
807
808      if (Op0Op0->getOpcode() != MipsISD::VEXTRACT_SEXT_ELT &&
809          Op0Op0->getOpcode() != MipsISD::VEXTRACT_ZEXT_ELT)
810        return SDValue();
811
812      EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT();
813      unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits();
814
815      if (TotalBits == 32 ||
816          (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT &&
817           TotalBits <= 32)) {
818        SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1),
819                          Op0Op0->getOperand(2) };
820        DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT,
821                        Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands());
822        return Op0Op0;
823      }
824    }
825  }
826
827  if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2()))
828    return SDValue();
829
830  return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget);
831}
832
833
834static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG,
835                                 TargetLowering::DAGCombinerInfo &DCI,
836                                 const MipsSubtarget *Subtarget) {
837  EVT Ty = N->getValueType(0);
838
839  if (((Ty != MVT::v2i16) || !Subtarget->hasDSPR2()) && (Ty != MVT::v4i8))
840    return SDValue();
841
842  return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget);
843}
844
845static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC) {
846  bool IsV216 = (Ty == MVT::v2i16);
847
848  switch (CC) {
849  case ISD::SETEQ:
850  case ISD::SETNE:  return true;
851  case ISD::SETLT:
852  case ISD::SETLE:
853  case ISD::SETGT:
854  case ISD::SETGE:  return IsV216;
855  case ISD::SETULT:
856  case ISD::SETULE:
857  case ISD::SETUGT:
858  case ISD::SETUGE: return !IsV216;
859  default:          return false;
860  }
861}
862
863static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) {
864  EVT Ty = N->getValueType(0);
865
866  if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8))
867    return SDValue();
868
869  if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get()))
870    return SDValue();
871
872  return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0),
873                     N->getOperand(1), N->getOperand(2));
874}
875
876static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) {
877  EVT Ty = N->getValueType(0);
878
879  if (Ty.is128BitVector() && Ty.isInteger()) {
880    // Try the following combines:
881    //   (vselect (setcc $a, $b, SETLT), $b, $a)) -> (vsmax $a, $b)
882    //   (vselect (setcc $a, $b, SETLE), $b, $a)) -> (vsmax $a, $b)
883    //   (vselect (setcc $a, $b, SETLT), $a, $b)) -> (vsmin $a, $b)
884    //   (vselect (setcc $a, $b, SETLE), $a, $b)) -> (vsmin $a, $b)
885    //   (vselect (setcc $a, $b, SETULT), $b, $a)) -> (vumax $a, $b)
886    //   (vselect (setcc $a, $b, SETULE), $b, $a)) -> (vumax $a, $b)
887    //   (vselect (setcc $a, $b, SETULT), $a, $b)) -> (vumin $a, $b)
888    //   (vselect (setcc $a, $b, SETULE), $a, $b)) -> (vumin $a, $b)
889    // SETGT/SETGE/SETUGT/SETUGE variants of these will show up initially but
890    // will be expanded to equivalent SETLT/SETLE/SETULT/SETULE versions by the
891    // legalizer.
892    SDValue Op0 = N->getOperand(0);
893
894    if (Op0->getOpcode() != ISD::SETCC)
895      return SDValue();
896
897    ISD::CondCode CondCode = cast<CondCodeSDNode>(Op0->getOperand(2))->get();
898    bool Signed;
899
900    if (CondCode == ISD::SETLT  || CondCode == ISD::SETLE)
901      Signed = true;
902    else if (CondCode == ISD::SETULT || CondCode == ISD::SETULE)
903      Signed = false;
904    else
905      return SDValue();
906
907    SDValue Op1 = N->getOperand(1);
908    SDValue Op2 = N->getOperand(2);
909    SDValue Op0Op0 = Op0->getOperand(0);
910    SDValue Op0Op1 = Op0->getOperand(1);
911
912    if (Op1 == Op0Op0 && Op2 == Op0Op1)
913      return DAG.getNode(Signed ? MipsISD::VSMIN : MipsISD::VUMIN, SDLoc(N),
914                         Ty, Op1, Op2);
915    else if (Op1 == Op0Op1 && Op2 == Op0Op0)
916      return DAG.getNode(Signed ? MipsISD::VSMAX : MipsISD::VUMAX, SDLoc(N),
917                         Ty, Op1, Op2);
918  } else if ((Ty == MVT::v2i16) || (Ty == MVT::v4i8)) {
919    SDValue SetCC = N->getOperand(0);
920
921    if (SetCC.getOpcode() != MipsISD::SETCC_DSP)
922      return SDValue();
923
924    return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty,
925                       SetCC.getOperand(0), SetCC.getOperand(1),
926                       N->getOperand(1), N->getOperand(2), SetCC.getOperand(2));
927  }
928
929  return SDValue();
930}
931
932static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
933                                 const MipsSubtarget *Subtarget) {
934  EVT Ty = N->getValueType(0);
935
936  if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) {
937    // Try the following combines:
938    //   (xor (or $a, $b), (build_vector allones))
939    //   (xor (or $a, $b), (bitcast (build_vector allones)))
940    SDValue Op0 = N->getOperand(0);
941    SDValue Op1 = N->getOperand(1);
942    SDValue NotOp;
943
944    if (ISD::isBuildVectorAllOnes(Op0.getNode()))
945      NotOp = Op1;
946    else if (ISD::isBuildVectorAllOnes(Op1.getNode()))
947      NotOp = Op0;
948    else
949      return SDValue();
950
951    if (NotOp->getOpcode() == ISD::OR)
952      return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0),
953                         NotOp->getOperand(1));
954  }
955
956  return SDValue();
957}
958
959SDValue
960MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const {
961  SelectionDAG &DAG = DCI.DAG;
962  SDValue Val;
963
964  switch (N->getOpcode()) {
965  case ISD::ADDE:
966    return performADDECombine(N, DAG, DCI, Subtarget);
967  case ISD::AND:
968    Val = performANDCombine(N, DAG, DCI, Subtarget);
969    break;
970  case ISD::OR:
971    Val = performORCombine(N, DAG, DCI, Subtarget);
972    break;
973  case ISD::SUBE:
974    return performSUBECombine(N, DAG, DCI, Subtarget);
975  case ISD::MUL:
976    return performMULCombine(N, DAG, DCI, this);
977  case ISD::SHL:
978    return performSHLCombine(N, DAG, DCI, Subtarget);
979  case ISD::SRA:
980    return performSRACombine(N, DAG, DCI, Subtarget);
981  case ISD::SRL:
982    return performSRLCombine(N, DAG, DCI, Subtarget);
983  case ISD::VSELECT:
984    return performVSELECTCombine(N, DAG);
985  case ISD::XOR:
986    Val = performXORCombine(N, DAG, Subtarget);
987    break;
988  case ISD::SETCC:
989    Val = performSETCCCombine(N, DAG);
990    break;
991  }
992
993  if (Val.getNode()) {
994    DEBUG(dbgs() << "\nMipsSE DAG Combine:\n";
995          N->printrWithDepth(dbgs(), &DAG);
996          dbgs() << "\n=> \n";
997          Val.getNode()->printrWithDepth(dbgs(), &DAG);
998          dbgs() << "\n");
999    return Val;
1000  }
1001
1002  return MipsTargetLowering::PerformDAGCombine(N, DCI);
1003}
1004
1005MachineBasicBlock *
1006MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1007                                                  MachineBasicBlock *BB) const {
1008  switch (MI->getOpcode()) {
1009  default:
1010    return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
1011  case Mips::BPOSGE32_PSEUDO:
1012    return emitBPOSGE32(MI, BB);
1013  case Mips::SNZ_B_PSEUDO:
1014    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B);
1015  case Mips::SNZ_H_PSEUDO:
1016    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H);
1017  case Mips::SNZ_W_PSEUDO:
1018    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W);
1019  case Mips::SNZ_D_PSEUDO:
1020    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D);
1021  case Mips::SNZ_V_PSEUDO:
1022    return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V);
1023  case Mips::SZ_B_PSEUDO:
1024    return emitMSACBranchPseudo(MI, BB, Mips::BZ_B);
1025  case Mips::SZ_H_PSEUDO:
1026    return emitMSACBranchPseudo(MI, BB, Mips::BZ_H);
1027  case Mips::SZ_W_PSEUDO:
1028    return emitMSACBranchPseudo(MI, BB, Mips::BZ_W);
1029  case Mips::SZ_D_PSEUDO:
1030    return emitMSACBranchPseudo(MI, BB, Mips::BZ_D);
1031  case Mips::SZ_V_PSEUDO:
1032    return emitMSACBranchPseudo(MI, BB, Mips::BZ_V);
1033  case Mips::COPY_FW_PSEUDO:
1034    return emitCOPY_FW(MI, BB);
1035  case Mips::COPY_FD_PSEUDO:
1036    return emitCOPY_FD(MI, BB);
1037  case Mips::INSERT_FW_PSEUDO:
1038    return emitINSERT_FW(MI, BB);
1039  case Mips::INSERT_FD_PSEUDO:
1040    return emitINSERT_FD(MI, BB);
1041  case Mips::FILL_FW_PSEUDO:
1042    return emitFILL_FW(MI, BB);
1043  case Mips::FILL_FD_PSEUDO:
1044    return emitFILL_FD(MI, BB);
1045  case Mips::FEXP2_W_1_PSEUDO:
1046    return emitFEXP2_W_1(MI, BB);
1047  case Mips::FEXP2_D_1_PSEUDO:
1048    return emitFEXP2_D_1(MI, BB);
1049  }
1050}
1051
1052bool MipsSETargetLowering::
1053isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
1054                                  unsigned NextStackOffset,
1055                                  const MipsFunctionInfo& FI) const {
1056  if (!EnableMipsTailCalls)
1057    return false;
1058
1059  // Return false if either the callee or caller has a byval argument.
1060  if (MipsCCInfo.hasByValArg() || FI.hasByvalArg())
1061    return false;
1062
1063  // Return true if the callee's argument area is no larger than the
1064  // caller's.
1065  return NextStackOffset <= FI.getIncomingArgSize();
1066}
1067
1068void MipsSETargetLowering::
1069getOpndList(SmallVectorImpl<SDValue> &Ops,
1070            std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
1071            bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
1072            CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
1073  // T9 should contain the address of the callee function if
1074  // -reloction-model=pic or it is an indirect call.
1075  if (IsPICCall || !GlobalOrExternal) {
1076    unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
1077    RegsToPass.push_front(std::make_pair(T9Reg, Callee));
1078  } else
1079    Ops.push_back(Callee);
1080
1081  MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal,
1082                                  InternalLinkage, CLI, Callee, Chain);
1083}
1084
1085SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1086  LoadSDNode &Nd = *cast<LoadSDNode>(Op);
1087
1088  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
1089    return MipsTargetLowering::lowerLOAD(Op, DAG);
1090
1091  // Replace a double precision load with two i32 loads and a buildpair64.
1092  SDLoc DL(Op);
1093  SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
1094  EVT PtrVT = Ptr.getValueType();
1095
1096  // i32 load from lower address.
1097  SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr,
1098                           MachinePointerInfo(), Nd.isVolatile(),
1099                           Nd.isNonTemporal(), Nd.isInvariant(),
1100                           Nd.getAlignment());
1101
1102  // i32 load from higher address.
1103  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
1104  SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
1105                           MachinePointerInfo(), Nd.isVolatile(),
1106                           Nd.isNonTemporal(), Nd.isInvariant(),
1107                           std::min(Nd.getAlignment(), 4U));
1108
1109  if (!Subtarget->isLittle())
1110    std::swap(Lo, Hi);
1111
1112  SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
1113  SDValue Ops[2] = {BP, Hi.getValue(1)};
1114  return DAG.getMergeValues(Ops, 2, DL);
1115}
1116
1117SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1118  StoreSDNode &Nd = *cast<StoreSDNode>(Op);
1119
1120  if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore)
1121    return MipsTargetLowering::lowerSTORE(Op, DAG);
1122
1123  // Replace a double precision store with two extractelement64s and i32 stores.
1124  SDLoc DL(Op);
1125  SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
1126  EVT PtrVT = Ptr.getValueType();
1127  SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
1128                           Val, DAG.getConstant(0, MVT::i32));
1129  SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
1130                           Val, DAG.getConstant(1, MVT::i32));
1131
1132  if (!Subtarget->isLittle())
1133    std::swap(Lo, Hi);
1134
1135  // i32 store to lower address.
1136  Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(),
1137                       Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(),
1138                       Nd.getTBAAInfo());
1139
1140  // i32 store to higher address.
1141  Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
1142  return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
1143                      Nd.isVolatile(), Nd.isNonTemporal(),
1144                      std::min(Nd.getAlignment(), 4U), Nd.getTBAAInfo());
1145}
1146
1147SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc,
1148                                          bool HasLo, bool HasHi,
1149                                          SelectionDAG &DAG) const {
1150  EVT Ty = Op.getOperand(0).getValueType();
1151  SDLoc DL(Op);
1152  SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped,
1153                             Op.getOperand(0), Op.getOperand(1));
1154  SDValue Lo, Hi;
1155
1156  if (HasLo)
1157    Lo = DAG.getNode(MipsISD::MFLO, DL, Ty, Mult);
1158  if (HasHi)
1159    Hi = DAG.getNode(MipsISD::MFHI, DL, Ty, Mult);
1160
1161  if (!HasLo || !HasHi)
1162    return HasLo ? Lo : Hi;
1163
1164  SDValue Vals[] = { Lo, Hi };
1165  return DAG.getMergeValues(Vals, 2, DL);
1166}
1167
1168
1169static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
1170  SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
1171                             DAG.getConstant(0, MVT::i32));
1172  SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
1173                             DAG.getConstant(1, MVT::i32));
1174  return DAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, InLo, InHi);
1175}
1176
1177static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) {
1178  SDValue Lo = DAG.getNode(MipsISD::MFLO, DL, MVT::i32, Op);
1179  SDValue Hi = DAG.getNode(MipsISD::MFHI, DL, MVT::i32, Op);
1180  return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi);
1181}
1182
1183// This function expands mips intrinsic nodes which have 64-bit input operands
1184// or output values.
1185//
1186// out64 = intrinsic-node in64
1187// =>
1188// lo = copy (extract-element (in64, 0))
1189// hi = copy (extract-element (in64, 1))
1190// mips-specific-node
1191// v0 = copy lo
1192// v1 = copy hi
1193// out64 = merge-values (v0, v1)
1194//
1195static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1196  SDLoc DL(Op);
1197  bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
1198  SmallVector<SDValue, 3> Ops;
1199  unsigned OpNo = 0;
1200
1201  // See if Op has a chain input.
1202  if (HasChainIn)
1203    Ops.push_back(Op->getOperand(OpNo++));
1204
1205  // The next operand is the intrinsic opcode.
1206  assert(Op->getOperand(OpNo).getOpcode() == ISD::TargetConstant);
1207
1208  // See if the next operand has type i64.
1209  SDValue Opnd = Op->getOperand(++OpNo), In64;
1210
1211  if (Opnd.getValueType() == MVT::i64)
1212    In64 = initAccumulator(Opnd, DL, DAG);
1213  else
1214    Ops.push_back(Opnd);
1215
1216  // Push the remaining operands.
1217  for (++OpNo ; OpNo < Op->getNumOperands(); ++OpNo)
1218    Ops.push_back(Op->getOperand(OpNo));
1219
1220  // Add In64 to the end of the list.
1221  if (In64.getNode())
1222    Ops.push_back(In64);
1223
1224  // Scan output.
1225  SmallVector<EVT, 2> ResTys;
1226
1227  for (SDNode::value_iterator I = Op->value_begin(), E = Op->value_end();
1228       I != E; ++I)
1229    ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I);
1230
1231  // Create node.
1232  SDValue Val = DAG.getNode(Opc, DL, ResTys, &Ops[0], Ops.size());
1233  SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val;
1234
1235  if (!HasChainIn)
1236    return Out;
1237
1238  assert(Val->getValueType(1) == MVT::Other);
1239  SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) };
1240  return DAG.getMergeValues(Vals, 2, DL);
1241}
1242
1243// Lower an MSA copy intrinsic into the specified SelectionDAG node
1244static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) {
1245  SDLoc DL(Op);
1246  SDValue Vec = Op->getOperand(1);
1247  SDValue Idx = Op->getOperand(2);
1248  EVT ResTy = Op->getValueType(0);
1249  EVT EltTy = Vec->getValueType(0).getVectorElementType();
1250
1251  SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx,
1252                               DAG.getValueType(EltTy));
1253
1254  return Result;
1255}
1256
1257static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) {
1258  EVT ResVecTy = Op->getValueType(0);
1259  EVT ViaVecTy = ResVecTy;
1260  SDLoc DL(Op);
1261
1262  // When ResVecTy == MVT::v2i64, LaneA is the upper 32 bits of the lane and
1263  // LaneB is the lower 32-bits. Otherwise LaneA and LaneB are alternating
1264  // lanes.
1265  SDValue LaneA;
1266  SDValue LaneB = Op->getOperand(2);
1267
1268  if (ResVecTy == MVT::v2i64) {
1269    LaneA = DAG.getConstant(0, MVT::i32);
1270    ViaVecTy = MVT::v4i32;
1271  } else
1272    LaneA = LaneB;
1273
1274  SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB,
1275                      LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB };
1276
1277  SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
1278                               ViaVecTy.getVectorNumElements());
1279
1280  if (ViaVecTy != ResVecTy)
1281    Result = DAG.getNode(ISD::BITCAST, DL, ResVecTy, Result);
1282
1283  return Result;
1284}
1285
1286static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
1287  return DAG.getConstant(Op->getConstantOperandVal(ImmOp), Op->getValueType(0));
1288}
1289
1290static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
1291                                   bool BigEndian, SelectionDAG &DAG) {
1292  EVT ViaVecTy = VecTy;
1293  SDValue SplatValueA = SplatValue;
1294  SDValue SplatValueB = SplatValue;
1295  SDLoc DL(SplatValue);
1296
1297  if (VecTy == MVT::v2i64) {
1298    // v2i64 BUILD_VECTOR must be performed via v4i32 so split into i32's.
1299    ViaVecTy = MVT::v4i32;
1300
1301    SplatValueA = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValue);
1302    SplatValueB = DAG.getNode(ISD::SRL, DL, MVT::i64, SplatValue,
1303                              DAG.getConstant(32, MVT::i32));
1304    SplatValueB = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValueB);
1305  }
1306
1307  // We currently hold the parts in little endian order. Swap them if
1308  // necessary.
1309  if (BigEndian)
1310    std::swap(SplatValueA, SplatValueB);
1311
1312  SDValue Ops[16] = { SplatValueA, SplatValueB, SplatValueA, SplatValueB,
1313                      SplatValueA, SplatValueB, SplatValueA, SplatValueB,
1314                      SplatValueA, SplatValueB, SplatValueA, SplatValueB,
1315                      SplatValueA, SplatValueB, SplatValueA, SplatValueB };
1316
1317  SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, Ops,
1318                               ViaVecTy.getVectorNumElements());
1319
1320  if (VecTy != ViaVecTy)
1321    Result = DAG.getNode(ISD::BITCAST, DL, VecTy, Result);
1322
1323  return Result;
1324}
1325
1326static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG,
1327                                        unsigned Opc, SDValue Imm,
1328                                        bool BigEndian) {
1329  EVT VecTy = Op->getValueType(0);
1330  SDValue Exp2Imm;
1331  SDLoc DL(Op);
1332
1333  // The DAG Combiner can't constant fold bitcasted vectors yet so we must do it
1334  // here for now.
1335  if (VecTy == MVT::v2i64) {
1336    if (ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(Imm)) {
1337      APInt BitImm = APInt(64, 1) << CImm->getAPIntValue();
1338
1339      SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), MVT::i32);
1340      SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), MVT::i32);
1341
1342      if (BigEndian)
1343        std::swap(BitImmLoOp, BitImmHiOp);
1344
1345      Exp2Imm =
1346          DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
1347                      DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, BitImmLoOp,
1348                                  BitImmHiOp, BitImmLoOp, BitImmHiOp));
1349    }
1350  }
1351
1352  if (Exp2Imm.getNode() == NULL) {
1353    // We couldnt constant fold, do a vector shift instead
1354
1355    // Extend i32 to i64 if necessary. Sign or zero extend doesn't matter since
1356    // only values 0-63 are valid.
1357    if (VecTy == MVT::v2i64)
1358      Imm = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Imm);
1359
1360    Exp2Imm = getBuildVectorSplat(VecTy, Imm, BigEndian, DAG);
1361
1362    Exp2Imm =
1363        DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, VecTy), Exp2Imm);
1364  }
1365
1366  return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm);
1367}
1368
1369static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) {
1370  EVT ResTy = Op->getValueType(0);
1371  EVT ViaVecTy = ResTy == MVT::v2i64 ? MVT::v4i32 : ResTy;
1372  SDLoc DL(Op);
1373  SDValue One = DAG.getConstant(1, ResTy);
1374  SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, Op->getOperand(2));
1375
1376  SDValue AllOnes = DAG.getConstant(-1, MVT::i32);
1377  SDValue AllOnesOperands[16] = { AllOnes, AllOnes, AllOnes, AllOnes,
1378                                  AllOnes, AllOnes, AllOnes, AllOnes,
1379                                  AllOnes, AllOnes, AllOnes, AllOnes,
1380                                  AllOnes, AllOnes, AllOnes, AllOnes };
1381  AllOnes = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, AllOnesOperands,
1382                        ViaVecTy.getVectorNumElements());
1383  if (ResTy != ViaVecTy)
1384    AllOnes = DAG.getNode(ISD::BITCAST, DL, ResTy, AllOnes);
1385
1386  Bit = DAG.getNode(ISD::XOR, DL, ResTy, Bit, AllOnes);
1387
1388  return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), Bit);
1389}
1390
1391static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) {
1392  SDLoc DL(Op);
1393  EVT ResTy = Op->getValueType(0);
1394  APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1)
1395                 << cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue();
1396  SDValue BitMask = DAG.getConstant(~BitImm, ResTy);
1397
1398  return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), BitMask);
1399}
1400
1401SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
1402                                                      SelectionDAG &DAG) const {
1403  SDLoc DL(Op);
1404
1405  switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
1406  default:
1407    return SDValue();
1408  case Intrinsic::mips_shilo:
1409    return lowerDSPIntr(Op, DAG, MipsISD::SHILO);
1410  case Intrinsic::mips_dpau_h_qbl:
1411    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL);
1412  case Intrinsic::mips_dpau_h_qbr:
1413    return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR);
1414  case Intrinsic::mips_dpsu_h_qbl:
1415    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL);
1416  case Intrinsic::mips_dpsu_h_qbr:
1417    return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR);
1418  case Intrinsic::mips_dpa_w_ph:
1419    return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH);
1420  case Intrinsic::mips_dps_w_ph:
1421    return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH);
1422  case Intrinsic::mips_dpax_w_ph:
1423    return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH);
1424  case Intrinsic::mips_dpsx_w_ph:
1425    return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH);
1426  case Intrinsic::mips_mulsa_w_ph:
1427    return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH);
1428  case Intrinsic::mips_mult:
1429    return lowerDSPIntr(Op, DAG, MipsISD::Mult);
1430  case Intrinsic::mips_multu:
1431    return lowerDSPIntr(Op, DAG, MipsISD::Multu);
1432  case Intrinsic::mips_madd:
1433    return lowerDSPIntr(Op, DAG, MipsISD::MAdd);
1434  case Intrinsic::mips_maddu:
1435    return lowerDSPIntr(Op, DAG, MipsISD::MAddu);
1436  case Intrinsic::mips_msub:
1437    return lowerDSPIntr(Op, DAG, MipsISD::MSub);
1438  case Intrinsic::mips_msubu:
1439    return lowerDSPIntr(Op, DAG, MipsISD::MSubu);
1440  case Intrinsic::mips_addv_b:
1441  case Intrinsic::mips_addv_h:
1442  case Intrinsic::mips_addv_w:
1443  case Intrinsic::mips_addv_d:
1444    return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
1445                       Op->getOperand(2));
1446  case Intrinsic::mips_addvi_b:
1447  case Intrinsic::mips_addvi_h:
1448  case Intrinsic::mips_addvi_w:
1449  case Intrinsic::mips_addvi_d:
1450    return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1),
1451                       lowerMSASplatImm(Op, 2, DAG));
1452  case Intrinsic::mips_and_v:
1453    return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
1454                       Op->getOperand(2));
1455  case Intrinsic::mips_andi_b:
1456    return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1),
1457                       lowerMSASplatImm(Op, 2, DAG));
1458  case Intrinsic::mips_bclr_b:
1459  case Intrinsic::mips_bclr_h:
1460  case Intrinsic::mips_bclr_w:
1461  case Intrinsic::mips_bclr_d:
1462    return lowerMSABitClear(Op, DAG);
1463  case Intrinsic::mips_bclri_b:
1464  case Intrinsic::mips_bclri_h:
1465  case Intrinsic::mips_bclri_w:
1466  case Intrinsic::mips_bclri_d:
1467    return lowerMSABitClearImm(Op, DAG);
1468  case Intrinsic::mips_binsli_b:
1469  case Intrinsic::mips_binsli_h:
1470  case Intrinsic::mips_binsli_w:
1471  case Intrinsic::mips_binsli_d: {
1472    EVT VecTy = Op->getValueType(0);
1473    EVT EltTy = VecTy.getVectorElementType();
1474    APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(),
1475                                       Op->getConstantOperandVal(3));
1476    return DAG.getNode(ISD::VSELECT, DL, VecTy,
1477                       DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
1478                       Op->getOperand(2));
1479  }
1480  case Intrinsic::mips_binsri_b:
1481  case Intrinsic::mips_binsri_h:
1482  case Intrinsic::mips_binsri_w:
1483  case Intrinsic::mips_binsri_d: {
1484    EVT VecTy = Op->getValueType(0);
1485    EVT EltTy = VecTy.getVectorElementType();
1486    APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(),
1487                                      Op->getConstantOperandVal(3));
1488    return DAG.getNode(ISD::VSELECT, DL, VecTy,
1489                       DAG.getConstant(Mask, VecTy, true), Op->getOperand(1),
1490                       Op->getOperand(2));
1491  }
1492  case Intrinsic::mips_bmnz_v:
1493    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
1494                       Op->getOperand(2), Op->getOperand(1));
1495  case Intrinsic::mips_bmnzi_b:
1496    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
1497                       lowerMSASplatImm(Op, 3, DAG), Op->getOperand(2),
1498                       Op->getOperand(1));
1499  case Intrinsic::mips_bmz_v:
1500    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
1501                       Op->getOperand(1), Op->getOperand(2));
1502  case Intrinsic::mips_bmzi_b:
1503    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
1504                       lowerMSASplatImm(Op, 3, DAG), Op->getOperand(1),
1505                       Op->getOperand(2));
1506  case Intrinsic::mips_bneg_b:
1507  case Intrinsic::mips_bneg_h:
1508  case Intrinsic::mips_bneg_w:
1509  case Intrinsic::mips_bneg_d: {
1510    EVT VecTy = Op->getValueType(0);
1511    SDValue One = DAG.getConstant(1, VecTy);
1512
1513    return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1),
1514                       DAG.getNode(ISD::SHL, DL, VecTy, One,
1515                                   Op->getOperand(2)));
1516  }
1517  case Intrinsic::mips_bnegi_b:
1518  case Intrinsic::mips_bnegi_h:
1519  case Intrinsic::mips_bnegi_w:
1520  case Intrinsic::mips_bnegi_d:
1521    return lowerMSABinaryBitImmIntr(Op, DAG, ISD::XOR, Op->getOperand(2),
1522                                    !Subtarget->isLittle());
1523  case Intrinsic::mips_bnz_b:
1524  case Intrinsic::mips_bnz_h:
1525  case Intrinsic::mips_bnz_w:
1526  case Intrinsic::mips_bnz_d:
1527    return DAG.getNode(MipsISD::VALL_NONZERO, DL, Op->getValueType(0),
1528                       Op->getOperand(1));
1529  case Intrinsic::mips_bnz_v:
1530    return DAG.getNode(MipsISD::VANY_NONZERO, DL, Op->getValueType(0),
1531                       Op->getOperand(1));
1532  case Intrinsic::mips_bsel_v:
1533    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
1534                       Op->getOperand(1), Op->getOperand(2),
1535                       Op->getOperand(3));
1536  case Intrinsic::mips_bseli_b:
1537    return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0),
1538                       Op->getOperand(1), Op->getOperand(2),
1539                       lowerMSASplatImm(Op, 3, DAG));
1540  case Intrinsic::mips_bset_b:
1541  case Intrinsic::mips_bset_h:
1542  case Intrinsic::mips_bset_w:
1543  case Intrinsic::mips_bset_d: {
1544    EVT VecTy = Op->getValueType(0);
1545    SDValue One = DAG.getConstant(1, VecTy);
1546
1547    return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1),
1548                       DAG.getNode(ISD::SHL, DL, VecTy, One,
1549                                   Op->getOperand(2)));
1550  }
1551  case Intrinsic::mips_bseti_b:
1552  case Intrinsic::mips_bseti_h:
1553  case Intrinsic::mips_bseti_w:
1554  case Intrinsic::mips_bseti_d:
1555    return lowerMSABinaryBitImmIntr(Op, DAG, ISD::OR, Op->getOperand(2),
1556                                    !Subtarget->isLittle());
1557  case Intrinsic::mips_bz_b:
1558  case Intrinsic::mips_bz_h:
1559  case Intrinsic::mips_bz_w:
1560  case Intrinsic::mips_bz_d:
1561    return DAG.getNode(MipsISD::VALL_ZERO, DL, Op->getValueType(0),
1562                       Op->getOperand(1));
1563  case Intrinsic::mips_bz_v:
1564    return DAG.getNode(MipsISD::VANY_ZERO, DL, Op->getValueType(0),
1565                       Op->getOperand(1));
1566  case Intrinsic::mips_ceq_b:
1567  case Intrinsic::mips_ceq_h:
1568  case Intrinsic::mips_ceq_w:
1569  case Intrinsic::mips_ceq_d:
1570    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1571                        Op->getOperand(2), ISD::SETEQ);
1572  case Intrinsic::mips_ceqi_b:
1573  case Intrinsic::mips_ceqi_h:
1574  case Intrinsic::mips_ceqi_w:
1575  case Intrinsic::mips_ceqi_d:
1576    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1577                        lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ);
1578  case Intrinsic::mips_cle_s_b:
1579  case Intrinsic::mips_cle_s_h:
1580  case Intrinsic::mips_cle_s_w:
1581  case Intrinsic::mips_cle_s_d:
1582    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1583                        Op->getOperand(2), ISD::SETLE);
1584  case Intrinsic::mips_clei_s_b:
1585  case Intrinsic::mips_clei_s_h:
1586  case Intrinsic::mips_clei_s_w:
1587  case Intrinsic::mips_clei_s_d:
1588    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1589                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLE);
1590  case Intrinsic::mips_cle_u_b:
1591  case Intrinsic::mips_cle_u_h:
1592  case Intrinsic::mips_cle_u_w:
1593  case Intrinsic::mips_cle_u_d:
1594    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1595                        Op->getOperand(2), ISD::SETULE);
1596  case Intrinsic::mips_clei_u_b:
1597  case Intrinsic::mips_clei_u_h:
1598  case Intrinsic::mips_clei_u_w:
1599  case Intrinsic::mips_clei_u_d:
1600    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1601                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULE);
1602  case Intrinsic::mips_clt_s_b:
1603  case Intrinsic::mips_clt_s_h:
1604  case Intrinsic::mips_clt_s_w:
1605  case Intrinsic::mips_clt_s_d:
1606    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1607                        Op->getOperand(2), ISD::SETLT);
1608  case Intrinsic::mips_clti_s_b:
1609  case Intrinsic::mips_clti_s_h:
1610  case Intrinsic::mips_clti_s_w:
1611  case Intrinsic::mips_clti_s_d:
1612    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1613                        lowerMSASplatImm(Op, 2, DAG), ISD::SETLT);
1614  case Intrinsic::mips_clt_u_b:
1615  case Intrinsic::mips_clt_u_h:
1616  case Intrinsic::mips_clt_u_w:
1617  case Intrinsic::mips_clt_u_d:
1618    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1619                        Op->getOperand(2), ISD::SETULT);
1620  case Intrinsic::mips_clti_u_b:
1621  case Intrinsic::mips_clti_u_h:
1622  case Intrinsic::mips_clti_u_w:
1623  case Intrinsic::mips_clti_u_d:
1624    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1625                        lowerMSASplatImm(Op, 2, DAG), ISD::SETULT);
1626  case Intrinsic::mips_copy_s_b:
1627  case Intrinsic::mips_copy_s_h:
1628  case Intrinsic::mips_copy_s_w:
1629    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT);
1630  case Intrinsic::mips_copy_s_d:
1631    // Don't lower directly into VEXTRACT_SEXT_ELT since i64 might be illegal.
1632    // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
1633    // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
1634    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
1635                       Op->getOperand(1), Op->getOperand(2));
1636  case Intrinsic::mips_copy_u_b:
1637  case Intrinsic::mips_copy_u_h:
1638  case Intrinsic::mips_copy_u_w:
1639    return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT);
1640  case Intrinsic::mips_copy_u_d:
1641    // Don't lower directly into VEXTRACT_ZEXT_ELT since i64 might be illegal.
1642    // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type
1643    // legalizer and EXTRACT_VECTOR_ELT lowering sort it out.
1644    //
1645    // Note: When i64 is illegal, this results in copy_s.w instructions instead
1646    // of copy_u.w instructions. This makes no difference to the behaviour
1647    // since i64 is only illegal when the register file is 32-bit.
1648    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
1649                       Op->getOperand(1), Op->getOperand(2));
1650  case Intrinsic::mips_div_s_b:
1651  case Intrinsic::mips_div_s_h:
1652  case Intrinsic::mips_div_s_w:
1653  case Intrinsic::mips_div_s_d:
1654    return DAG.getNode(ISD::SDIV, DL, Op->getValueType(0), Op->getOperand(1),
1655                       Op->getOperand(2));
1656  case Intrinsic::mips_div_u_b:
1657  case Intrinsic::mips_div_u_h:
1658  case Intrinsic::mips_div_u_w:
1659  case Intrinsic::mips_div_u_d:
1660    return DAG.getNode(ISD::UDIV, DL, Op->getValueType(0), Op->getOperand(1),
1661                       Op->getOperand(2));
1662  case Intrinsic::mips_fadd_w:
1663  case Intrinsic::mips_fadd_d:
1664    return DAG.getNode(ISD::FADD, DL, Op->getValueType(0), Op->getOperand(1),
1665                       Op->getOperand(2));
1666  // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away
1667  case Intrinsic::mips_fceq_w:
1668  case Intrinsic::mips_fceq_d:
1669    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1670                        Op->getOperand(2), ISD::SETOEQ);
1671  case Intrinsic::mips_fcle_w:
1672  case Intrinsic::mips_fcle_d:
1673    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1674                        Op->getOperand(2), ISD::SETOLE);
1675  case Intrinsic::mips_fclt_w:
1676  case Intrinsic::mips_fclt_d:
1677    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1678                        Op->getOperand(2), ISD::SETOLT);
1679  case Intrinsic::mips_fcne_w:
1680  case Intrinsic::mips_fcne_d:
1681    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1682                        Op->getOperand(2), ISD::SETONE);
1683  case Intrinsic::mips_fcor_w:
1684  case Intrinsic::mips_fcor_d:
1685    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1686                        Op->getOperand(2), ISD::SETO);
1687  case Intrinsic::mips_fcueq_w:
1688  case Intrinsic::mips_fcueq_d:
1689    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1690                        Op->getOperand(2), ISD::SETUEQ);
1691  case Intrinsic::mips_fcule_w:
1692  case Intrinsic::mips_fcule_d:
1693    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1694                        Op->getOperand(2), ISD::SETULE);
1695  case Intrinsic::mips_fcult_w:
1696  case Intrinsic::mips_fcult_d:
1697    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1698                        Op->getOperand(2), ISD::SETULT);
1699  case Intrinsic::mips_fcun_w:
1700  case Intrinsic::mips_fcun_d:
1701    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1702                        Op->getOperand(2), ISD::SETUO);
1703  case Intrinsic::mips_fcune_w:
1704  case Intrinsic::mips_fcune_d:
1705    return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
1706                        Op->getOperand(2), ISD::SETUNE);
1707  case Intrinsic::mips_fdiv_w:
1708  case Intrinsic::mips_fdiv_d:
1709    return DAG.getNode(ISD::FDIV, DL, Op->getValueType(0), Op->getOperand(1),
1710                       Op->getOperand(2));
1711  case Intrinsic::mips_ffint_u_w:
1712  case Intrinsic::mips_ffint_u_d:
1713    return DAG.getNode(ISD::UINT_TO_FP, DL, Op->getValueType(0),
1714                       Op->getOperand(1));
1715  case Intrinsic::mips_ffint_s_w:
1716  case Intrinsic::mips_ffint_s_d:
1717    return DAG.getNode(ISD::SINT_TO_FP, DL, Op->getValueType(0),
1718                       Op->getOperand(1));
1719  case Intrinsic::mips_fill_b:
1720  case Intrinsic::mips_fill_h:
1721  case Intrinsic::mips_fill_w:
1722  case Intrinsic::mips_fill_d: {
1723    SmallVector<SDValue, 16> Ops;
1724    EVT ResTy = Op->getValueType(0);
1725
1726    for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
1727      Ops.push_back(Op->getOperand(1));
1728
1729    // If ResTy is v2i64 then the type legalizer will break this node down into
1730    // an equivalent v4i32.
1731    return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, &Ops[0], Ops.size());
1732  }
1733  case Intrinsic::mips_fexp2_w:
1734  case Intrinsic::mips_fexp2_d: {
1735    EVT ResTy = Op->getValueType(0);
1736    return DAG.getNode(
1737        ISD::FMUL, SDLoc(Op), ResTy, Op->getOperand(1),
1738        DAG.getNode(ISD::FEXP2, SDLoc(Op), ResTy, Op->getOperand(2)));
1739  }
1740  case Intrinsic::mips_flog2_w:
1741  case Intrinsic::mips_flog2_d:
1742    return DAG.getNode(ISD::FLOG2, DL, Op->getValueType(0), Op->getOperand(1));
1743  case Intrinsic::mips_fmadd_w:
1744  case Intrinsic::mips_fmadd_d:
1745    return DAG.getNode(ISD::FMA, SDLoc(Op), Op->getValueType(0),
1746                       Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
1747  case Intrinsic::mips_fmul_w:
1748  case Intrinsic::mips_fmul_d:
1749    return DAG.getNode(ISD::FMUL, DL, Op->getValueType(0), Op->getOperand(1),
1750                       Op->getOperand(2));
1751  case Intrinsic::mips_fmsub_w:
1752  case Intrinsic::mips_fmsub_d: {
1753    EVT ResTy = Op->getValueType(0);
1754    return DAG.getNode(ISD::FSUB, SDLoc(Op), ResTy, Op->getOperand(1),
1755                       DAG.getNode(ISD::FMUL, SDLoc(Op), ResTy,
1756                                   Op->getOperand(2), Op->getOperand(3)));
1757  }
1758  case Intrinsic::mips_frint_w:
1759  case Intrinsic::mips_frint_d:
1760    return DAG.getNode(ISD::FRINT, DL, Op->getValueType(0), Op->getOperand(1));
1761  case Intrinsic::mips_fsqrt_w:
1762  case Intrinsic::mips_fsqrt_d:
1763    return DAG.getNode(ISD::FSQRT, DL, Op->getValueType(0), Op->getOperand(1));
1764  case Intrinsic::mips_fsub_w:
1765  case Intrinsic::mips_fsub_d:
1766    return DAG.getNode(ISD::FSUB, DL, Op->getValueType(0), Op->getOperand(1),
1767                       Op->getOperand(2));
1768  case Intrinsic::mips_ftrunc_u_w:
1769  case Intrinsic::mips_ftrunc_u_d:
1770    return DAG.getNode(ISD::FP_TO_UINT, DL, Op->getValueType(0),
1771                       Op->getOperand(1));
1772  case Intrinsic::mips_ftrunc_s_w:
1773  case Intrinsic::mips_ftrunc_s_d:
1774    return DAG.getNode(ISD::FP_TO_SINT, DL, Op->getValueType(0),
1775                       Op->getOperand(1));
1776  case Intrinsic::mips_ilvev_b:
1777  case Intrinsic::mips_ilvev_h:
1778  case Intrinsic::mips_ilvev_w:
1779  case Intrinsic::mips_ilvev_d:
1780    return DAG.getNode(MipsISD::ILVEV, DL, Op->getValueType(0),
1781                       Op->getOperand(1), Op->getOperand(2));
1782  case Intrinsic::mips_ilvl_b:
1783  case Intrinsic::mips_ilvl_h:
1784  case Intrinsic::mips_ilvl_w:
1785  case Intrinsic::mips_ilvl_d:
1786    return DAG.getNode(MipsISD::ILVL, DL, Op->getValueType(0),
1787                       Op->getOperand(1), Op->getOperand(2));
1788  case Intrinsic::mips_ilvod_b:
1789  case Intrinsic::mips_ilvod_h:
1790  case Intrinsic::mips_ilvod_w:
1791  case Intrinsic::mips_ilvod_d:
1792    return DAG.getNode(MipsISD::ILVOD, DL, Op->getValueType(0),
1793                       Op->getOperand(1), Op->getOperand(2));
1794  case Intrinsic::mips_ilvr_b:
1795  case Intrinsic::mips_ilvr_h:
1796  case Intrinsic::mips_ilvr_w:
1797  case Intrinsic::mips_ilvr_d:
1798    return DAG.getNode(MipsISD::ILVR, DL, Op->getValueType(0),
1799                       Op->getOperand(1), Op->getOperand(2));
1800  case Intrinsic::mips_insert_b:
1801  case Intrinsic::mips_insert_h:
1802  case Intrinsic::mips_insert_w:
1803  case Intrinsic::mips_insert_d:
1804    return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0),
1805                       Op->getOperand(1), Op->getOperand(3), Op->getOperand(2));
1806  case Intrinsic::mips_ldi_b:
1807  case Intrinsic::mips_ldi_h:
1808  case Intrinsic::mips_ldi_w:
1809  case Intrinsic::mips_ldi_d:
1810    return lowerMSASplatImm(Op, 1, DAG);
1811  case Intrinsic::mips_lsa: {
1812    EVT ResTy = Op->getValueType(0);
1813    return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1),
1814                       DAG.getNode(ISD::SHL, SDLoc(Op), ResTy,
1815                                   Op->getOperand(2), Op->getOperand(3)));
1816  }
1817  case Intrinsic::mips_maddv_b:
1818  case Intrinsic::mips_maddv_h:
1819  case Intrinsic::mips_maddv_w:
1820  case Intrinsic::mips_maddv_d: {
1821    EVT ResTy = Op->getValueType(0);
1822    return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1),
1823                       DAG.getNode(ISD::MUL, SDLoc(Op), ResTy,
1824                                   Op->getOperand(2), Op->getOperand(3)));
1825  }
1826  case Intrinsic::mips_max_s_b:
1827  case Intrinsic::mips_max_s_h:
1828  case Intrinsic::mips_max_s_w:
1829  case Intrinsic::mips_max_s_d:
1830    return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
1831                       Op->getOperand(1), Op->getOperand(2));
1832  case Intrinsic::mips_max_u_b:
1833  case Intrinsic::mips_max_u_h:
1834  case Intrinsic::mips_max_u_w:
1835  case Intrinsic::mips_max_u_d:
1836    return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
1837                       Op->getOperand(1), Op->getOperand(2));
1838  case Intrinsic::mips_maxi_s_b:
1839  case Intrinsic::mips_maxi_s_h:
1840  case Intrinsic::mips_maxi_s_w:
1841  case Intrinsic::mips_maxi_s_d:
1842    return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0),
1843                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1844  case Intrinsic::mips_maxi_u_b:
1845  case Intrinsic::mips_maxi_u_h:
1846  case Intrinsic::mips_maxi_u_w:
1847  case Intrinsic::mips_maxi_u_d:
1848    return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0),
1849                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1850  case Intrinsic::mips_min_s_b:
1851  case Intrinsic::mips_min_s_h:
1852  case Intrinsic::mips_min_s_w:
1853  case Intrinsic::mips_min_s_d:
1854    return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
1855                       Op->getOperand(1), Op->getOperand(2));
1856  case Intrinsic::mips_min_u_b:
1857  case Intrinsic::mips_min_u_h:
1858  case Intrinsic::mips_min_u_w:
1859  case Intrinsic::mips_min_u_d:
1860    return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
1861                       Op->getOperand(1), Op->getOperand(2));
1862  case Intrinsic::mips_mini_s_b:
1863  case Intrinsic::mips_mini_s_h:
1864  case Intrinsic::mips_mini_s_w:
1865  case Intrinsic::mips_mini_s_d:
1866    return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0),
1867                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1868  case Intrinsic::mips_mini_u_b:
1869  case Intrinsic::mips_mini_u_h:
1870  case Intrinsic::mips_mini_u_w:
1871  case Intrinsic::mips_mini_u_d:
1872    return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0),
1873                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1874  case Intrinsic::mips_mod_s_b:
1875  case Intrinsic::mips_mod_s_h:
1876  case Intrinsic::mips_mod_s_w:
1877  case Intrinsic::mips_mod_s_d:
1878    return DAG.getNode(ISD::SREM, DL, Op->getValueType(0), Op->getOperand(1),
1879                       Op->getOperand(2));
1880  case Intrinsic::mips_mod_u_b:
1881  case Intrinsic::mips_mod_u_h:
1882  case Intrinsic::mips_mod_u_w:
1883  case Intrinsic::mips_mod_u_d:
1884    return DAG.getNode(ISD::UREM, DL, Op->getValueType(0), Op->getOperand(1),
1885                       Op->getOperand(2));
1886  case Intrinsic::mips_mulv_b:
1887  case Intrinsic::mips_mulv_h:
1888  case Intrinsic::mips_mulv_w:
1889  case Intrinsic::mips_mulv_d:
1890    return DAG.getNode(ISD::MUL, DL, Op->getValueType(0), Op->getOperand(1),
1891                       Op->getOperand(2));
1892  case Intrinsic::mips_msubv_b:
1893  case Intrinsic::mips_msubv_h:
1894  case Intrinsic::mips_msubv_w:
1895  case Intrinsic::mips_msubv_d: {
1896    EVT ResTy = Op->getValueType(0);
1897    return DAG.getNode(ISD::SUB, SDLoc(Op), ResTy, Op->getOperand(1),
1898                       DAG.getNode(ISD::MUL, SDLoc(Op), ResTy,
1899                                   Op->getOperand(2), Op->getOperand(3)));
1900  }
1901  case Intrinsic::mips_nlzc_b:
1902  case Intrinsic::mips_nlzc_h:
1903  case Intrinsic::mips_nlzc_w:
1904  case Intrinsic::mips_nlzc_d:
1905    return DAG.getNode(ISD::CTLZ, DL, Op->getValueType(0), Op->getOperand(1));
1906  case Intrinsic::mips_nor_v: {
1907    SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0),
1908                              Op->getOperand(1), Op->getOperand(2));
1909    return DAG.getNOT(DL, Res, Res->getValueType(0));
1910  }
1911  case Intrinsic::mips_nori_b: {
1912    SDValue Res =  DAG.getNode(ISD::OR, DL, Op->getValueType(0),
1913                               Op->getOperand(1),
1914                               lowerMSASplatImm(Op, 2, DAG));
1915    return DAG.getNOT(DL, Res, Res->getValueType(0));
1916  }
1917  case Intrinsic::mips_or_v:
1918    return DAG.getNode(ISD::OR, DL, Op->getValueType(0), Op->getOperand(1),
1919                       Op->getOperand(2));
1920  case Intrinsic::mips_ori_b:
1921    return DAG.getNode(ISD::OR, DL, Op->getValueType(0),
1922                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1923  case Intrinsic::mips_pckev_b:
1924  case Intrinsic::mips_pckev_h:
1925  case Intrinsic::mips_pckev_w:
1926  case Intrinsic::mips_pckev_d:
1927    return DAG.getNode(MipsISD::PCKEV, DL, Op->getValueType(0),
1928                       Op->getOperand(1), Op->getOperand(2));
1929  case Intrinsic::mips_pckod_b:
1930  case Intrinsic::mips_pckod_h:
1931  case Intrinsic::mips_pckod_w:
1932  case Intrinsic::mips_pckod_d:
1933    return DAG.getNode(MipsISD::PCKOD, DL, Op->getValueType(0),
1934                       Op->getOperand(1), Op->getOperand(2));
1935  case Intrinsic::mips_pcnt_b:
1936  case Intrinsic::mips_pcnt_h:
1937  case Intrinsic::mips_pcnt_w:
1938  case Intrinsic::mips_pcnt_d:
1939    return DAG.getNode(ISD::CTPOP, DL, Op->getValueType(0), Op->getOperand(1));
1940  case Intrinsic::mips_shf_b:
1941  case Intrinsic::mips_shf_h:
1942  case Intrinsic::mips_shf_w:
1943    return DAG.getNode(MipsISD::SHF, DL, Op->getValueType(0),
1944                       Op->getOperand(2), Op->getOperand(1));
1945  case Intrinsic::mips_sll_b:
1946  case Intrinsic::mips_sll_h:
1947  case Intrinsic::mips_sll_w:
1948  case Intrinsic::mips_sll_d:
1949    return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1),
1950                       Op->getOperand(2));
1951  case Intrinsic::mips_slli_b:
1952  case Intrinsic::mips_slli_h:
1953  case Intrinsic::mips_slli_w:
1954  case Intrinsic::mips_slli_d:
1955    return DAG.getNode(ISD::SHL, DL, Op->getValueType(0),
1956                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1957  case Intrinsic::mips_splat_b:
1958  case Intrinsic::mips_splat_h:
1959  case Intrinsic::mips_splat_w:
1960  case Intrinsic::mips_splat_d:
1961    // We can't lower via VECTOR_SHUFFLE because it requires constant shuffle
1962    // masks, nor can we lower via BUILD_VECTOR & EXTRACT_VECTOR_ELT because
1963    // EXTRACT_VECTOR_ELT can't extract i64's on MIPS32.
1964    // Instead we lower to MipsISD::VSHF and match from there.
1965    return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
1966                       lowerMSASplatZExt(Op, 2, DAG), Op->getOperand(1),
1967                       Op->getOperand(1));
1968  case Intrinsic::mips_splati_b:
1969  case Intrinsic::mips_splati_h:
1970  case Intrinsic::mips_splati_w:
1971  case Intrinsic::mips_splati_d:
1972    return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
1973                       lowerMSASplatImm(Op, 2, DAG), Op->getOperand(1),
1974                       Op->getOperand(1));
1975  case Intrinsic::mips_sra_b:
1976  case Intrinsic::mips_sra_h:
1977  case Intrinsic::mips_sra_w:
1978  case Intrinsic::mips_sra_d:
1979    return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1),
1980                       Op->getOperand(2));
1981  case Intrinsic::mips_srai_b:
1982  case Intrinsic::mips_srai_h:
1983  case Intrinsic::mips_srai_w:
1984  case Intrinsic::mips_srai_d:
1985    return DAG.getNode(ISD::SRA, DL, Op->getValueType(0),
1986                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1987  case Intrinsic::mips_srl_b:
1988  case Intrinsic::mips_srl_h:
1989  case Intrinsic::mips_srl_w:
1990  case Intrinsic::mips_srl_d:
1991    return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1),
1992                       Op->getOperand(2));
1993  case Intrinsic::mips_srli_b:
1994  case Intrinsic::mips_srli_h:
1995  case Intrinsic::mips_srli_w:
1996  case Intrinsic::mips_srli_d:
1997    return DAG.getNode(ISD::SRL, DL, Op->getValueType(0),
1998                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
1999  case Intrinsic::mips_subv_b:
2000  case Intrinsic::mips_subv_h:
2001  case Intrinsic::mips_subv_w:
2002  case Intrinsic::mips_subv_d:
2003    return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), Op->getOperand(1),
2004                       Op->getOperand(2));
2005  case Intrinsic::mips_subvi_b:
2006  case Intrinsic::mips_subvi_h:
2007  case Intrinsic::mips_subvi_w:
2008  case Intrinsic::mips_subvi_d:
2009    return DAG.getNode(ISD::SUB, DL, Op->getValueType(0),
2010                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
2011  case Intrinsic::mips_vshf_b:
2012  case Intrinsic::mips_vshf_h:
2013  case Intrinsic::mips_vshf_w:
2014  case Intrinsic::mips_vshf_d:
2015    return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0),
2016                       Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
2017  case Intrinsic::mips_xor_v:
2018    return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), Op->getOperand(1),
2019                       Op->getOperand(2));
2020  case Intrinsic::mips_xori_b:
2021    return DAG.getNode(ISD::XOR, DL, Op->getValueType(0),
2022                       Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG));
2023  }
2024}
2025
2026static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
2027  SDLoc DL(Op);
2028  SDValue ChainIn = Op->getOperand(0);
2029  SDValue Address = Op->getOperand(2);
2030  SDValue Offset  = Op->getOperand(3);
2031  EVT ResTy = Op->getValueType(0);
2032  EVT PtrTy = Address->getValueType(0);
2033
2034  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
2035
2036  return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false,
2037                     false, false, 16);
2038}
2039
2040SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
2041                                                     SelectionDAG &DAG) const {
2042  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
2043  switch (Intr) {
2044  default:
2045    return SDValue();
2046  case Intrinsic::mips_extp:
2047    return lowerDSPIntr(Op, DAG, MipsISD::EXTP);
2048  case Intrinsic::mips_extpdp:
2049    return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP);
2050  case Intrinsic::mips_extr_w:
2051    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W);
2052  case Intrinsic::mips_extr_r_w:
2053    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W);
2054  case Intrinsic::mips_extr_rs_w:
2055    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W);
2056  case Intrinsic::mips_extr_s_h:
2057    return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H);
2058  case Intrinsic::mips_mthlip:
2059    return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP);
2060  case Intrinsic::mips_mulsaq_s_w_ph:
2061    return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH);
2062  case Intrinsic::mips_maq_s_w_phl:
2063    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL);
2064  case Intrinsic::mips_maq_s_w_phr:
2065    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR);
2066  case Intrinsic::mips_maq_sa_w_phl:
2067    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL);
2068  case Intrinsic::mips_maq_sa_w_phr:
2069    return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR);
2070  case Intrinsic::mips_dpaq_s_w_ph:
2071    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH);
2072  case Intrinsic::mips_dpsq_s_w_ph:
2073    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH);
2074  case Intrinsic::mips_dpaq_sa_l_w:
2075    return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W);
2076  case Intrinsic::mips_dpsq_sa_l_w:
2077    return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W);
2078  case Intrinsic::mips_dpaqx_s_w_ph:
2079    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH);
2080  case Intrinsic::mips_dpaqx_sa_w_ph:
2081    return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH);
2082  case Intrinsic::mips_dpsqx_s_w_ph:
2083    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH);
2084  case Intrinsic::mips_dpsqx_sa_w_ph:
2085    return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH);
2086  case Intrinsic::mips_ld_b:
2087  case Intrinsic::mips_ld_h:
2088  case Intrinsic::mips_ld_w:
2089  case Intrinsic::mips_ld_d:
2090   return lowerMSALoadIntr(Op, DAG, Intr);
2091  }
2092}
2093
2094static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) {
2095  SDLoc DL(Op);
2096  SDValue ChainIn = Op->getOperand(0);
2097  SDValue Value   = Op->getOperand(2);
2098  SDValue Address = Op->getOperand(3);
2099  SDValue Offset  = Op->getOperand(4);
2100  EVT PtrTy = Address->getValueType(0);
2101
2102  Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset);
2103
2104  return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false,
2105                      false, 16);
2106}
2107
2108SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op,
2109                                                  SelectionDAG &DAG) const {
2110  unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
2111  switch (Intr) {
2112  default:
2113    return SDValue();
2114  case Intrinsic::mips_st_b:
2115  case Intrinsic::mips_st_h:
2116  case Intrinsic::mips_st_w:
2117  case Intrinsic::mips_st_d:
2118    return lowerMSAStoreIntr(Op, DAG, Intr);
2119  }
2120}
2121
2122/// \brief Check if the given BuildVectorSDNode is a splat.
2123/// This method currently relies on DAG nodes being reused when equivalent,
2124/// so it's possible for this to return false even when isConstantSplat returns
2125/// true.
2126static bool isSplatVector(const BuildVectorSDNode *N) {
2127  unsigned int nOps = N->getNumOperands();
2128  assert(nOps > 1 && "isSplatVector has 0 or 1 sized build vector");
2129
2130  SDValue Operand0 = N->getOperand(0);
2131
2132  for (unsigned int i = 1; i < nOps; ++i) {
2133    if (N->getOperand(i) != Operand0)
2134      return false;
2135  }
2136
2137  return true;
2138}
2139
2140// Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT.
2141//
2142// The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We
2143// choose to sign-extend but we could have equally chosen zero-extend. The
2144// DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT
2145// result into this node later (possibly changing it to a zero-extend in the
2146// process).
2147SDValue MipsSETargetLowering::
2148lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
2149  SDLoc DL(Op);
2150  EVT ResTy = Op->getValueType(0);
2151  SDValue Op0 = Op->getOperand(0);
2152  EVT VecTy = Op0->getValueType(0);
2153
2154  if (!VecTy.is128BitVector())
2155    return SDValue();
2156
2157  if (ResTy.isInteger()) {
2158    SDValue Op1 = Op->getOperand(1);
2159    EVT EltTy = VecTy.getVectorElementType();
2160    return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1,
2161                       DAG.getValueType(EltTy));
2162  }
2163
2164  return Op;
2165}
2166
2167static bool isConstantOrUndef(const SDValue Op) {
2168  if (Op->getOpcode() == ISD::UNDEF)
2169    return true;
2170  if (dyn_cast<ConstantSDNode>(Op))
2171    return true;
2172  if (dyn_cast<ConstantFPSDNode>(Op))
2173    return true;
2174  return false;
2175}
2176
2177static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) {
2178  for (unsigned i = 0; i < Op->getNumOperands(); ++i)
2179    if (isConstantOrUndef(Op->getOperand(i)))
2180      return true;
2181  return false;
2182}
2183
2184// Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the
2185// backend.
2186//
2187// Lowers according to the following rules:
2188// - Constant splats are legal as-is as long as the SplatBitSize is a power of
2189//   2 less than or equal to 64 and the value fits into a signed 10-bit
2190//   immediate
2191// - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize
2192//   is a power of 2 less than or equal to 64 and the value does not fit into a
2193//   signed 10-bit immediate
2194// - Non-constant splats are legal as-is.
2195// - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT.
2196// - All others are illegal and must be expanded.
2197SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op,
2198                                                SelectionDAG &DAG) const {
2199  BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
2200  EVT ResTy = Op->getValueType(0);
2201  SDLoc DL(Op);
2202  APInt SplatValue, SplatUndef;
2203  unsigned SplatBitSize;
2204  bool HasAnyUndefs;
2205
2206  if (!Subtarget->hasMSA() || !ResTy.is128BitVector())
2207    return SDValue();
2208
2209  if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
2210                            HasAnyUndefs, 8,
2211                            !Subtarget->isLittle()) && SplatBitSize <= 64) {
2212    // We can only cope with 8, 16, 32, or 64-bit elements
2213    if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 &&
2214        SplatBitSize != 64)
2215      return SDValue();
2216
2217    // If the value fits into a simm10 then we can use ldi.[bhwd]
2218    if (SplatValue.isSignedIntN(10))
2219      return Op;
2220
2221    EVT ViaVecTy;
2222
2223    switch (SplatBitSize) {
2224    default:
2225      return SDValue();
2226    case 8:
2227      ViaVecTy = MVT::v16i8;
2228      break;
2229    case 16:
2230      ViaVecTy = MVT::v8i16;
2231      break;
2232    case 32:
2233      ViaVecTy = MVT::v4i32;
2234      break;
2235    case 64:
2236      // There's no fill.d to fall back on for 64-bit values
2237      return SDValue();
2238    }
2239
2240    // SelectionDAG::getConstant will promote SplatValue appropriately.
2241    SDValue Result = DAG.getConstant(SplatValue, ViaVecTy);
2242
2243    // Bitcast to the type we originally wanted
2244    if (ViaVecTy != ResTy)
2245      Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result);
2246
2247    return Result;
2248  } else if (isSplatVector(Node))
2249    return Op;
2250  else if (!isConstantOrUndefBUILD_VECTOR(Node)) {
2251    // Use INSERT_VECTOR_ELT operations rather than expand to stores.
2252    // The resulting code is the same length as the expansion, but it doesn't
2253    // use memory operations
2254    EVT ResTy = Node->getValueType(0);
2255
2256    assert(ResTy.isVector());
2257
2258    unsigned NumElts = ResTy.getVectorNumElements();
2259    SDValue Vector = DAG.getUNDEF(ResTy);
2260    for (unsigned i = 0; i < NumElts; ++i) {
2261      Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
2262                           Node->getOperand(i),
2263                           DAG.getConstant(i, MVT::i32));
2264    }
2265    return Vector;
2266  }
2267
2268  return SDValue();
2269}
2270
2271// Lower VECTOR_SHUFFLE into SHF (if possible).
2272//
2273// SHF splits the vector into blocks of four elements, then shuffles these
2274// elements according to a <4 x i2> constant (encoded as an integer immediate).
2275//
2276// It is therefore possible to lower into SHF when the mask takes the form:
2277//   <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
2278// When undef's appear they are treated as if they were whatever value is
2279// necessary in order to fit the above form.
2280//
2281// For example:
2282//   %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
2283//                      <8 x i32> <i32 3, i32 2, i32 1, i32 0,
2284//                                 i32 7, i32 6, i32 5, i32 4>
2285// is lowered to:
2286//   (SHF_H $w0, $w1, 27)
2287// where the 27 comes from:
2288//   3 + (2 << 2) + (1 << 4) + (0 << 6)
2289static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy,
2290                                       SmallVector<int, 16> Indices,
2291                                       SelectionDAG &DAG) {
2292  int SHFIndices[4] = { -1, -1, -1, -1 };
2293
2294  if (Indices.size() < 4)
2295    return SDValue();
2296
2297  for (unsigned i = 0; i < 4; ++i) {
2298    for (unsigned j = i; j < Indices.size(); j += 4) {
2299      int Idx = Indices[j];
2300
2301      // Convert from vector index to 4-element subvector index
2302      // If an index refers to an element outside of the subvector then give up
2303      if (Idx != -1) {
2304        Idx -= 4 * (j / 4);
2305        if (Idx < 0 || Idx >= 4)
2306          return SDValue();
2307      }
2308
2309      // If the mask has an undef, replace it with the current index.
2310      // Note that it might still be undef if the current index is also undef
2311      if (SHFIndices[i] == -1)
2312        SHFIndices[i] = Idx;
2313
2314      // Check that non-undef values are the same as in the mask. If they
2315      // aren't then give up
2316      if (!(Idx == -1 || Idx == SHFIndices[i]))
2317        return SDValue();
2318    }
2319  }
2320
2321  // Calculate the immediate. Replace any remaining undefs with zero
2322  APInt Imm(32, 0);
2323  for (int i = 3; i >= 0; --i) {
2324    int Idx = SHFIndices[i];
2325
2326    if (Idx == -1)
2327      Idx = 0;
2328
2329    Imm <<= 2;
2330    Imm |= Idx & 0x3;
2331  }
2332
2333  return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
2334                     DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
2335}
2336
2337// Lower VECTOR_SHUFFLE into ILVEV (if possible).
2338//
2339// ILVEV interleaves the even elements from each vector.
2340//
2341// It is possible to lower into ILVEV when the mask takes the form:
2342//   <0, n, 2, n+2, 4, n+4, ...>
2343// where n is the number of elements in the vector.
2344//
2345// When undef's appear in the mask they are treated as if they were whatever
2346// value is necessary in order to fit the above form.
2347static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy,
2348                                         SmallVector<int, 16> Indices,
2349                                         SelectionDAG &DAG) {
2350  assert ((Indices.size() % 2) == 0);
2351  int WsIdx = 0;
2352  int WtIdx = ResTy.getVectorNumElements();
2353
2354  for (unsigned i = 0; i < Indices.size(); i += 2) {
2355    if (Indices[i] != -1 && Indices[i] != WsIdx)
2356      return SDValue();
2357    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
2358      return SDValue();
2359    WsIdx += 2;
2360    WtIdx += 2;
2361  }
2362
2363  return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0),
2364                     Op->getOperand(1));
2365}
2366
2367// Lower VECTOR_SHUFFLE into ILVOD (if possible).
2368//
2369// ILVOD interleaves the odd elements from each vector.
2370//
2371// It is possible to lower into ILVOD when the mask takes the form:
2372//   <1, n+1, 3, n+3, 5, n+5, ...>
2373// where n is the number of elements in the vector.
2374//
2375// When undef's appear in the mask they are treated as if they were whatever
2376// value is necessary in order to fit the above form.
2377static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy,
2378                                         SmallVector<int, 16> Indices,
2379                                         SelectionDAG &DAG) {
2380  assert ((Indices.size() % 2) == 0);
2381  int WsIdx = 1;
2382  int WtIdx = ResTy.getVectorNumElements() + 1;
2383
2384  for (unsigned i = 0; i < Indices.size(); i += 2) {
2385    if (Indices[i] != -1 && Indices[i] != WsIdx)
2386      return SDValue();
2387    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
2388      return SDValue();
2389    WsIdx += 2;
2390    WtIdx += 2;
2391  }
2392
2393  return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0),
2394                     Op->getOperand(1));
2395}
2396
2397// Lower VECTOR_SHUFFLE into ILVL (if possible).
2398//
2399// ILVL interleaves consecutive elements from the left half of each vector.
2400//
2401// It is possible to lower into ILVL when the mask takes the form:
2402//   <0, n, 1, n+1, 2, n+2, ...>
2403// where n is the number of elements in the vector.
2404//
2405// When undef's appear in the mask they are treated as if they were whatever
2406// value is necessary in order to fit the above form.
2407static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
2408                                        SmallVector<int, 16> Indices,
2409                                        SelectionDAG &DAG) {
2410  assert ((Indices.size() % 2) == 0);
2411  int WsIdx = 0;
2412  int WtIdx = ResTy.getVectorNumElements();
2413
2414  for (unsigned i = 0; i < Indices.size(); i += 2) {
2415    if (Indices[i] != -1 && Indices[i] != WsIdx)
2416      return SDValue();
2417    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
2418      return SDValue();
2419    WsIdx ++;
2420    WtIdx ++;
2421  }
2422
2423  return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0),
2424                     Op->getOperand(1));
2425}
2426
2427// Lower VECTOR_SHUFFLE into ILVR (if possible).
2428//
2429// ILVR interleaves consecutive elements from the right half of each vector.
2430//
2431// It is possible to lower into ILVR when the mask takes the form:
2432//   <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
2433// where n is the number of elements in the vector and x is half n.
2434//
2435// When undef's appear in the mask they are treated as if they were whatever
2436// value is necessary in order to fit the above form.
2437static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
2438                                        SmallVector<int, 16> Indices,
2439                                        SelectionDAG &DAG) {
2440  assert ((Indices.size() % 2) == 0);
2441  unsigned NumElts = ResTy.getVectorNumElements();
2442  int WsIdx = NumElts / 2;
2443  int WtIdx = NumElts + NumElts / 2;
2444
2445  for (unsigned i = 0; i < Indices.size(); i += 2) {
2446    if (Indices[i] != -1 && Indices[i] != WsIdx)
2447      return SDValue();
2448    if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
2449      return SDValue();
2450    WsIdx ++;
2451    WtIdx ++;
2452  }
2453
2454  return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0),
2455                     Op->getOperand(1));
2456}
2457
2458// Lower VECTOR_SHUFFLE into PCKEV (if possible).
2459//
2460// PCKEV copies the even elements of each vector into the result vector.
2461//
2462// It is possible to lower into PCKEV when the mask takes the form:
2463//   <0, 2, 4, ..., n, n+2, n+4, ...>
2464// where n is the number of elements in the vector.
2465//
2466// When undef's appear in the mask they are treated as if they were whatever
2467// value is necessary in order to fit the above form.
2468static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy,
2469                                         SmallVector<int, 16> Indices,
2470                                         SelectionDAG &DAG) {
2471  assert ((Indices.size() % 2) == 0);
2472  int Idx = 0;
2473
2474  for (unsigned i = 0; i < Indices.size(); ++i) {
2475    if (Indices[i] != -1 && Indices[i] != Idx)
2476      return SDValue();
2477    Idx += 2;
2478  }
2479
2480  return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0),
2481                     Op->getOperand(1));
2482}
2483
2484// Lower VECTOR_SHUFFLE into PCKOD (if possible).
2485//
2486// PCKOD copies the odd elements of each vector into the result vector.
2487//
2488// It is possible to lower into PCKOD when the mask takes the form:
2489//   <1, 3, 5, ..., n+1, n+3, n+5, ...>
2490// where n is the number of elements in the vector.
2491//
2492// When undef's appear in the mask they are treated as if they were whatever
2493// value is necessary in order to fit the above form.
2494static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
2495                                         SmallVector<int, 16> Indices,
2496                                         SelectionDAG &DAG) {
2497  assert ((Indices.size() % 2) == 0);
2498  int Idx = 1;
2499
2500  for (unsigned i = 0; i < Indices.size(); ++i) {
2501    if (Indices[i] != -1 && Indices[i] != Idx)
2502      return SDValue();
2503    Idx += 2;
2504  }
2505
2506  return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0),
2507                     Op->getOperand(1));
2508}
2509
2510// Lower VECTOR_SHUFFLE into VSHF.
2511//
2512// This mostly consists of converting the shuffle indices in Indices into a
2513// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is
2514// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example,
2515// if the type is v8i16 and all the indices are less than 8 then the second
2516// operand is unused and can be replaced with anything. We choose to replace it
2517// with the used operand since this reduces the number of instructions overall.
2518static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy,
2519                                        SmallVector<int, 16> Indices,
2520                                        SelectionDAG &DAG) {
2521  SmallVector<SDValue, 16> Ops;
2522  SDValue Op0;
2523  SDValue Op1;
2524  EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger();
2525  EVT MaskEltTy = MaskVecTy.getVectorElementType();
2526  bool Using1stVec = false;
2527  bool Using2ndVec = false;
2528  SDLoc DL(Op);
2529  int ResTyNumElts = ResTy.getVectorNumElements();
2530
2531  for (int i = 0; i < ResTyNumElts; ++i) {
2532    // Idx == -1 means UNDEF
2533    int Idx = Indices[i];
2534
2535    if (0 <= Idx && Idx < ResTyNumElts)
2536      Using1stVec = true;
2537    if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2)
2538      Using2ndVec = true;
2539  }
2540
2541  for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
2542       ++I)
2543    Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
2544
2545  SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0],
2546                                Ops.size());
2547
2548  if (Using1stVec && Using2ndVec) {
2549    Op0 = Op->getOperand(0);
2550    Op1 = Op->getOperand(1);
2551  } else if (Using1stVec)
2552    Op0 = Op1 = Op->getOperand(0);
2553  else if (Using2ndVec)
2554    Op0 = Op1 = Op->getOperand(1);
2555  else
2556    llvm_unreachable("shuffle vector mask references neither vector operand?");
2557
2558  return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1);
2559}
2560
2561// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the
2562// indices in the shuffle.
2563SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
2564                                                  SelectionDAG &DAG) const {
2565  ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op);
2566  EVT ResTy = Op->getValueType(0);
2567
2568  if (!ResTy.is128BitVector())
2569    return SDValue();
2570
2571  int ResTyNumElts = ResTy.getVectorNumElements();
2572  SmallVector<int, 16> Indices;
2573
2574  for (int i = 0; i < ResTyNumElts; ++i)
2575    Indices.push_back(Node->getMaskElt(i));
2576
2577  SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
2578  if (Result.getNode())
2579    return Result;
2580  Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
2581  if (Result.getNode())
2582    return Result;
2583  Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
2584  if (Result.getNode())
2585    return Result;
2586  Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
2587  if (Result.getNode())
2588    return Result;
2589  Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
2590  if (Result.getNode())
2591    return Result;
2592  Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
2593  if (Result.getNode())
2594    return Result;
2595  Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
2596  if (Result.getNode())
2597    return Result;
2598  return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
2599}
2600
2601MachineBasicBlock * MipsSETargetLowering::
2602emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
2603  // $bb:
2604  //  bposge32_pseudo $vr0
2605  //  =>
2606  // $bb:
2607  //  bposge32 $tbb
2608  // $fbb:
2609  //  li $vr2, 0
2610  //  b $sink
2611  // $tbb:
2612  //  li $vr1, 1
2613  // $sink:
2614  //  $vr0 = phi($vr2, $fbb, $vr1, $tbb)
2615
2616  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2617  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2618  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2619  DebugLoc DL = MI->getDebugLoc();
2620  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2621  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2622  MachineFunction *F = BB->getParent();
2623  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2624  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2625  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2626  F->insert(It, FBB);
2627  F->insert(It, TBB);
2628  F->insert(It, Sink);
2629
2630  // Transfer the remainder of BB and its successor edges to Sink.
2631  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2632               BB->end());
2633  Sink->transferSuccessorsAndUpdatePHIs(BB);
2634
2635  // Add successors.
2636  BB->addSuccessor(FBB);
2637  BB->addSuccessor(TBB);
2638  FBB->addSuccessor(Sink);
2639  TBB->addSuccessor(Sink);
2640
2641  // Insert the real bposge32 instruction to $BB.
2642  BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB);
2643
2644  // Fill $FBB.
2645  unsigned VR2 = RegInfo.createVirtualRegister(RC);
2646  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2)
2647    .addReg(Mips::ZERO).addImm(0);
2648  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2649
2650  // Fill $TBB.
2651  unsigned VR1 = RegInfo.createVirtualRegister(RC);
2652  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1)
2653    .addReg(Mips::ZERO).addImm(1);
2654
2655  // Insert phi function to $Sink.
2656  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2657          MI->getOperand(0).getReg())
2658    .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB);
2659
2660  MI->eraseFromParent();   // The pseudo instruction is gone now.
2661  return Sink;
2662}
2663
2664MachineBasicBlock * MipsSETargetLowering::
2665emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB,
2666                     unsigned BranchOp) const{
2667  // $bb:
2668  //  vany_nonzero $rd, $ws
2669  //  =>
2670  // $bb:
2671  //  bnz.b $ws, $tbb
2672  //  b $fbb
2673  // $fbb:
2674  //  li $rd1, 0
2675  //  b $sink
2676  // $tbb:
2677  //  li $rd2, 1
2678  // $sink:
2679  //  $rd = phi($rd1, $fbb, $rd2, $tbb)
2680
2681  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2682  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2683  const TargetRegisterClass *RC = &Mips::GPR32RegClass;
2684  DebugLoc DL = MI->getDebugLoc();
2685  const BasicBlock *LLVM_BB = BB->getBasicBlock();
2686  MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
2687  MachineFunction *F = BB->getParent();
2688  MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
2689  MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
2690  MachineBasicBlock *Sink  = F->CreateMachineBasicBlock(LLVM_BB);
2691  F->insert(It, FBB);
2692  F->insert(It, TBB);
2693  F->insert(It, Sink);
2694
2695  // Transfer the remainder of BB and its successor edges to Sink.
2696  Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
2697               BB->end());
2698  Sink->transferSuccessorsAndUpdatePHIs(BB);
2699
2700  // Add successors.
2701  BB->addSuccessor(FBB);
2702  BB->addSuccessor(TBB);
2703  FBB->addSuccessor(Sink);
2704  TBB->addSuccessor(Sink);
2705
2706  // Insert the real bnz.b instruction to $BB.
2707  BuildMI(BB, DL, TII->get(BranchOp))
2708    .addReg(MI->getOperand(1).getReg())
2709    .addMBB(TBB);
2710
2711  // Fill $FBB.
2712  unsigned RD1 = RegInfo.createVirtualRegister(RC);
2713  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1)
2714    .addReg(Mips::ZERO).addImm(0);
2715  BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
2716
2717  // Fill $TBB.
2718  unsigned RD2 = RegInfo.createVirtualRegister(RC);
2719  BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2)
2720    .addReg(Mips::ZERO).addImm(1);
2721
2722  // Insert phi function to $Sink.
2723  BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
2724          MI->getOperand(0).getReg())
2725    .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB);
2726
2727  MI->eraseFromParent();   // The pseudo instruction is gone now.
2728  return Sink;
2729}
2730
2731// Emit the COPY_FW pseudo instruction.
2732//
2733// copy_fw_pseudo $fd, $ws, n
2734// =>
2735// copy_u_w $rt, $ws, $n
2736// mtc1     $rt, $fd
2737//
2738// When n is zero, the equivalent operation can be performed with (potentially)
2739// zero instructions due to register overlaps. This optimization is never valid
2740// for lane 1 because it would require FR=0 mode which isn't supported by MSA.
2741MachineBasicBlock * MipsSETargetLowering::
2742emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{
2743  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2744  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2745  DebugLoc DL = MI->getDebugLoc();
2746  unsigned Fd = MI->getOperand(0).getReg();
2747  unsigned Ws = MI->getOperand(1).getReg();
2748  unsigned Lane = MI->getOperand(2).getImm();
2749
2750  if (Lane == 0)
2751    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_lo);
2752  else {
2753    unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
2754
2755    BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(1);
2756    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
2757  }
2758
2759  MI->eraseFromParent();   // The pseudo instruction is gone now.
2760  return BB;
2761}
2762
2763// Emit the COPY_FD pseudo instruction.
2764//
2765// copy_fd_pseudo $fd, $ws, n
2766// =>
2767// splati.d $wt, $ws, $n
2768// copy $fd, $wt:sub_64
2769//
2770// When n is zero, the equivalent operation can be performed with (potentially)
2771// zero instructions due to register overlaps. This optimization is always
2772// valid because FR=1 mode which is the only supported mode in MSA.
2773MachineBasicBlock * MipsSETargetLowering::
2774emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{
2775  assert(Subtarget->isFP64bit());
2776
2777  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2778  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2779  unsigned Fd  = MI->getOperand(0).getReg();
2780  unsigned Ws  = MI->getOperand(1).getReg();
2781  unsigned Lane = MI->getOperand(2).getImm() * 2;
2782  DebugLoc DL = MI->getDebugLoc();
2783
2784  if (Lane == 0)
2785    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64);
2786  else {
2787    unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
2788
2789    BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1);
2790    BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64);
2791  }
2792
2793  MI->eraseFromParent();   // The pseudo instruction is gone now.
2794  return BB;
2795}
2796
2797// Emit the INSERT_FW pseudo instruction.
2798//
2799// insert_fw_pseudo $wd, $wd_in, $n, $fs
2800// =>
2801// subreg_to_reg $wt:sub_lo, $fs
2802// insve_w $wd[$n], $wd_in, $wt[0]
2803MachineBasicBlock *
2804MipsSETargetLowering::emitINSERT_FW(MachineInstr *MI,
2805                                    MachineBasicBlock *BB) const {
2806  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2807  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2808  DebugLoc DL = MI->getDebugLoc();
2809  unsigned Wd = MI->getOperand(0).getReg();
2810  unsigned Wd_in = MI->getOperand(1).getReg();
2811  unsigned Lane = MI->getOperand(2).getImm();
2812  unsigned Fs = MI->getOperand(3).getReg();
2813  unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
2814
2815  BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
2816      .addImm(0)
2817      .addReg(Fs)
2818      .addImm(Mips::sub_lo);
2819  BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_W), Wd)
2820      .addReg(Wd_in)
2821      .addImm(Lane)
2822      .addReg(Wt);
2823
2824  MI->eraseFromParent(); // The pseudo instruction is gone now.
2825  return BB;
2826}
2827
2828// Emit the INSERT_FD pseudo instruction.
2829//
2830// insert_fd_pseudo $wd, $fs, n
2831// =>
2832// subreg_to_reg $wt:sub_64, $fs
2833// insve_d $wd[$n], $wd_in, $wt[0]
2834MachineBasicBlock *
2835MipsSETargetLowering::emitINSERT_FD(MachineInstr *MI,
2836                                    MachineBasicBlock *BB) const {
2837  assert(Subtarget->isFP64bit());
2838
2839  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2840  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2841  DebugLoc DL = MI->getDebugLoc();
2842  unsigned Wd = MI->getOperand(0).getReg();
2843  unsigned Wd_in = MI->getOperand(1).getReg();
2844  unsigned Lane = MI->getOperand(2).getImm();
2845  unsigned Fs = MI->getOperand(3).getReg();
2846  unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
2847
2848  BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
2849      .addImm(0)
2850      .addReg(Fs)
2851      .addImm(Mips::sub_64);
2852  BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_D), Wd)
2853      .addReg(Wd_in)
2854      .addImm(Lane)
2855      .addReg(Wt);
2856
2857  MI->eraseFromParent(); // The pseudo instruction is gone now.
2858  return BB;
2859}
2860
2861// Emit the FILL_FW pseudo instruction.
2862//
2863// fill_fw_pseudo $wd, $fs
2864// =>
2865// implicit_def $wt1
2866// insert_subreg $wt2:subreg_lo, $wt1, $fs
2867// splati.w $wd, $wt2[0]
2868MachineBasicBlock *
2869MipsSETargetLowering::emitFILL_FW(MachineInstr *MI,
2870                                  MachineBasicBlock *BB) const {
2871  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2872  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2873  DebugLoc DL = MI->getDebugLoc();
2874  unsigned Wd = MI->getOperand(0).getReg();
2875  unsigned Fs = MI->getOperand(1).getReg();
2876  unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
2877  unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
2878
2879  BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1);
2880  BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2)
2881      .addReg(Wt1)
2882      .addReg(Fs)
2883      .addImm(Mips::sub_lo);
2884  BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wd).addReg(Wt2).addImm(0);
2885
2886  MI->eraseFromParent(); // The pseudo instruction is gone now.
2887  return BB;
2888}
2889
2890// Emit the FILL_FD pseudo instruction.
2891//
2892// fill_fd_pseudo $wd, $fs
2893// =>
2894// implicit_def $wt1
2895// insert_subreg $wt2:subreg_64, $wt1, $fs
2896// splati.d $wd, $wt2[0]
2897MachineBasicBlock *
2898MipsSETargetLowering::emitFILL_FD(MachineInstr *MI,
2899                                  MachineBasicBlock *BB) const {
2900  assert(Subtarget->isFP64bit());
2901
2902  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2903  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2904  DebugLoc DL = MI->getDebugLoc();
2905  unsigned Wd = MI->getOperand(0).getReg();
2906  unsigned Fs = MI->getOperand(1).getReg();
2907  unsigned Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
2908  unsigned Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass);
2909
2910  BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1);
2911  BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2)
2912      .addReg(Wt1)
2913      .addReg(Fs)
2914      .addImm(Mips::sub_64);
2915  BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wd).addReg(Wt2).addImm(0);
2916
2917  MI->eraseFromParent();   // The pseudo instruction is gone now.
2918  return BB;
2919}
2920
2921// Emit the FEXP2_W_1 pseudo instructions.
2922//
2923// fexp2_w_1_pseudo $wd, $wt
2924// =>
2925// ldi.w $ws, 1
2926// fexp2.w $wd, $ws, $wt
2927MachineBasicBlock *
2928MipsSETargetLowering::emitFEXP2_W_1(MachineInstr *MI,
2929                                    MachineBasicBlock *BB) const {
2930  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2931  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2932  const TargetRegisterClass *RC = &Mips::MSA128WRegClass;
2933  unsigned Ws1 = RegInfo.createVirtualRegister(RC);
2934  unsigned Ws2 = RegInfo.createVirtualRegister(RC);
2935  DebugLoc DL = MI->getDebugLoc();
2936
2937  // Splat 1.0 into a vector
2938  BuildMI(*BB, MI, DL, TII->get(Mips::LDI_W), Ws1).addImm(1);
2939  BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_W), Ws2).addReg(Ws1);
2940
2941  // Emit 1.0 * fexp2(Wt)
2942  BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_W), MI->getOperand(0).getReg())
2943      .addReg(Ws2)
2944      .addReg(MI->getOperand(1).getReg());
2945
2946  MI->eraseFromParent(); // The pseudo instruction is gone now.
2947  return BB;
2948}
2949
2950// Emit the FEXP2_D_1 pseudo instructions.
2951//
2952// fexp2_d_1_pseudo $wd, $wt
2953// =>
2954// ldi.d $ws, 1
2955// fexp2.d $wd, $ws, $wt
2956MachineBasicBlock *
2957MipsSETargetLowering::emitFEXP2_D_1(MachineInstr *MI,
2958                                    MachineBasicBlock *BB) const {
2959  const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
2960  MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
2961  const TargetRegisterClass *RC = &Mips::MSA128DRegClass;
2962  unsigned Ws1 = RegInfo.createVirtualRegister(RC);
2963  unsigned Ws2 = RegInfo.createVirtualRegister(RC);
2964  DebugLoc DL = MI->getDebugLoc();
2965
2966  // Splat 1.0 into a vector
2967  BuildMI(*BB, MI, DL, TII->get(Mips::LDI_D), Ws1).addImm(1);
2968  BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_D), Ws2).addReg(Ws1);
2969
2970  // Emit 1.0 * fexp2(Wt)
2971  BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_D), MI->getOperand(0).getReg())
2972      .addReg(Ws2)
2973      .addReg(MI->getOperand(1).getReg());
2974
2975  MI->eraseFromParent(); // The pseudo instruction is gone now.
2976  return BB;
2977}
2978