MipsSEISelLowering.cpp revision b6ed641c719e3f370b0e9120823b349993c3494b
1//===-- MipsSEISelLowering.cpp - MipsSE DAG Lowering Interface --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// Subclass of MipsTargetLowering specialized for mips32/64. 11// 12//===----------------------------------------------------------------------===// 13#include "MipsSEISelLowering.h" 14#include "MipsRegisterInfo.h" 15#include "MipsTargetMachine.h" 16#include "llvm/CodeGen/MachineInstrBuilder.h" 17#include "llvm/CodeGen/MachineRegisterInfo.h" 18#include "llvm/IR/Intrinsics.h" 19#include "llvm/Support/CommandLine.h" 20#include "llvm/Target/TargetInstrInfo.h" 21 22using namespace llvm; 23 24static cl::opt<bool> 25EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden, 26 cl::desc("MIPS: Enable tail calls."), cl::init(false)); 27 28static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false), 29 cl::desc("Expand double precision loads and " 30 "stores to their single precision " 31 "counterparts")); 32 33MipsSETargetLowering::MipsSETargetLowering(MipsTargetMachine &TM) 34 : MipsTargetLowering(TM) { 35 // Set up the register classes 36 addRegisterClass(MVT::i32, &Mips::GPR32RegClass); 37 38 if (HasMips64) 39 addRegisterClass(MVT::i64, &Mips::GPR64RegClass); 40 41 if (Subtarget->hasDSP() || Subtarget->hasMSA()) { 42 // Expand all truncating stores and extending loads. 43 unsigned FirstVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 44 unsigned LastVT = (unsigned)MVT::LAST_VECTOR_VALUETYPE; 45 46 for (unsigned VT0 = FirstVT; VT0 <= LastVT; ++VT0) { 47 for (unsigned VT1 = FirstVT; VT1 <= LastVT; ++VT1) 48 setTruncStoreAction((MVT::SimpleValueType)VT0, 49 (MVT::SimpleValueType)VT1, Expand); 50 51 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT0, Expand); 52 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT0, Expand); 53 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT0, Expand); 54 } 55 } 56 57 if (Subtarget->hasDSP()) { 58 MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8}; 59 60 for (unsigned i = 0; i < array_lengthof(VecTys); ++i) { 61 addRegisterClass(VecTys[i], &Mips::DSPRRegClass); 62 63 // Expand all builtin opcodes. 64 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 65 setOperationAction(Opc, VecTys[i], Expand); 66 67 setOperationAction(ISD::ADD, VecTys[i], Legal); 68 setOperationAction(ISD::SUB, VecTys[i], Legal); 69 setOperationAction(ISD::LOAD, VecTys[i], Legal); 70 setOperationAction(ISD::STORE, VecTys[i], Legal); 71 setOperationAction(ISD::BITCAST, VecTys[i], Legal); 72 } 73 74 setTargetDAGCombine(ISD::SHL); 75 setTargetDAGCombine(ISD::SRA); 76 setTargetDAGCombine(ISD::SRL); 77 setTargetDAGCombine(ISD::SETCC); 78 setTargetDAGCombine(ISD::VSELECT); 79 } 80 81 if (Subtarget->hasDSPR2()) 82 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 83 84 if (Subtarget->hasMSA()) { 85 addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass); 86 addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass); 87 addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass); 88 addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass); 89 addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass); 90 addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass); 91 addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass); 92 93 setTargetDAGCombine(ISD::AND); 94 setTargetDAGCombine(ISD::SRA); 95 setTargetDAGCombine(ISD::VSELECT); 96 setTargetDAGCombine(ISD::XOR); 97 } 98 99 if (!Subtarget->mipsSEUsesSoftFloat()) { 100 addRegisterClass(MVT::f32, &Mips::FGR32RegClass); 101 102 // When dealing with single precision only, use libcalls 103 if (!Subtarget->isSingleFloat()) { 104 if (Subtarget->isFP64bit()) 105 addRegisterClass(MVT::f64, &Mips::FGR64RegClass); 106 else 107 addRegisterClass(MVT::f64, &Mips::AFGR64RegClass); 108 } 109 } 110 111 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 112 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 113 setOperationAction(ISD::MULHS, MVT::i32, Custom); 114 setOperationAction(ISD::MULHU, MVT::i32, Custom); 115 116 if (HasMips64) { 117 setOperationAction(ISD::MULHS, MVT::i64, Custom); 118 setOperationAction(ISD::MULHU, MVT::i64, Custom); 119 setOperationAction(ISD::MUL, MVT::i64, Custom); 120 } 121 122 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 123 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); 124 125 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 126 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 127 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 128 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 129 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 130 setOperationAction(ISD::LOAD, MVT::i32, Custom); 131 setOperationAction(ISD::STORE, MVT::i32, Custom); 132 133 setTargetDAGCombine(ISD::ADDE); 134 setTargetDAGCombine(ISD::SUBE); 135 setTargetDAGCombine(ISD::MUL); 136 137 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 138 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 139 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 140 141 if (NoDPLoadStore) { 142 setOperationAction(ISD::LOAD, MVT::f64, Custom); 143 setOperationAction(ISD::STORE, MVT::f64, Custom); 144 } 145 146 computeRegisterProperties(); 147} 148 149const MipsTargetLowering * 150llvm::createMipsSETargetLowering(MipsTargetMachine &TM) { 151 return new MipsSETargetLowering(TM); 152} 153 154// Enable MSA support for the given integer type and Register class. 155void MipsSETargetLowering:: 156addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) { 157 addRegisterClass(Ty, RC); 158 159 // Expand all builtin opcodes. 160 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 161 setOperationAction(Opc, Ty, Expand); 162 163 setOperationAction(ISD::BITCAST, Ty, Legal); 164 setOperationAction(ISD::LOAD, Ty, Legal); 165 setOperationAction(ISD::STORE, Ty, Legal); 166 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom); 167 setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal); 168 setOperationAction(ISD::BUILD_VECTOR, Ty, Custom); 169 170 setOperationAction(ISD::ADD, Ty, Legal); 171 setOperationAction(ISD::AND, Ty, Legal); 172 setOperationAction(ISD::CTLZ, Ty, Legal); 173 setOperationAction(ISD::CTPOP, Ty, Legal); 174 setOperationAction(ISD::MUL, Ty, Legal); 175 setOperationAction(ISD::OR, Ty, Legal); 176 setOperationAction(ISD::SDIV, Ty, Legal); 177 setOperationAction(ISD::SREM, Ty, Legal); 178 setOperationAction(ISD::SHL, Ty, Legal); 179 setOperationAction(ISD::SRA, Ty, Legal); 180 setOperationAction(ISD::SRL, Ty, Legal); 181 setOperationAction(ISD::SUB, Ty, Legal); 182 setOperationAction(ISD::UDIV, Ty, Legal); 183 setOperationAction(ISD::UREM, Ty, Legal); 184 setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom); 185 setOperationAction(ISD::VSELECT, Ty, Legal); 186 setOperationAction(ISD::XOR, Ty, Legal); 187 188 if (Ty == MVT::v4i32 || Ty == MVT::v2i64) { 189 setOperationAction(ISD::FP_TO_SINT, Ty, Legal); 190 setOperationAction(ISD::FP_TO_UINT, Ty, Legal); 191 setOperationAction(ISD::SINT_TO_FP, Ty, Legal); 192 setOperationAction(ISD::UINT_TO_FP, Ty, Legal); 193 } 194 195 setOperationAction(ISD::SETCC, Ty, Legal); 196 setCondCodeAction(ISD::SETNE, Ty, Expand); 197 setCondCodeAction(ISD::SETGE, Ty, Expand); 198 setCondCodeAction(ISD::SETGT, Ty, Expand); 199 setCondCodeAction(ISD::SETUGE, Ty, Expand); 200 setCondCodeAction(ISD::SETUGT, Ty, Expand); 201} 202 203// Enable MSA support for the given floating-point type and Register class. 204void MipsSETargetLowering:: 205addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) { 206 addRegisterClass(Ty, RC); 207 208 // Expand all builtin opcodes. 209 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 210 setOperationAction(Opc, Ty, Expand); 211 212 setOperationAction(ISD::LOAD, Ty, Legal); 213 setOperationAction(ISD::STORE, Ty, Legal); 214 setOperationAction(ISD::BITCAST, Ty, Legal); 215 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal); 216 setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal); 217 218 if (Ty != MVT::v8f16) { 219 setOperationAction(ISD::FABS, Ty, Legal); 220 setOperationAction(ISD::FADD, Ty, Legal); 221 setOperationAction(ISD::FDIV, Ty, Legal); 222 setOperationAction(ISD::FLOG2, Ty, Legal); 223 setOperationAction(ISD::FMA, Ty, Legal); 224 setOperationAction(ISD::FMUL, Ty, Legal); 225 setOperationAction(ISD::FRINT, Ty, Legal); 226 setOperationAction(ISD::FSQRT, Ty, Legal); 227 setOperationAction(ISD::FSUB, Ty, Legal); 228 setOperationAction(ISD::VSELECT, Ty, Legal); 229 230 setOperationAction(ISD::SETCC, Ty, Legal); 231 setCondCodeAction(ISD::SETOGE, Ty, Expand); 232 setCondCodeAction(ISD::SETOGT, Ty, Expand); 233 setCondCodeAction(ISD::SETUGE, Ty, Expand); 234 setCondCodeAction(ISD::SETUGT, Ty, Expand); 235 setCondCodeAction(ISD::SETGE, Ty, Expand); 236 setCondCodeAction(ISD::SETGT, Ty, Expand); 237 } 238} 239 240bool 241MipsSETargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const { 242 MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy; 243 244 switch (SVT) { 245 case MVT::i64: 246 case MVT::i32: 247 if (Fast) 248 *Fast = true; 249 return true; 250 default: 251 return false; 252 } 253} 254 255SDValue MipsSETargetLowering::LowerOperation(SDValue Op, 256 SelectionDAG &DAG) const { 257 switch(Op.getOpcode()) { 258 case ISD::LOAD: return lowerLOAD(Op, DAG); 259 case ISD::STORE: return lowerSTORE(Op, DAG); 260 case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG); 261 case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG); 262 case ISD::MULHS: return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG); 263 case ISD::MULHU: return lowerMulDiv(Op, MipsISD::Multu, false, true, DAG); 264 case ISD::MUL: return lowerMulDiv(Op, MipsISD::Mult, true, false, DAG); 265 case ISD::SDIVREM: return lowerMulDiv(Op, MipsISD::DivRem, true, true, DAG); 266 case ISD::UDIVREM: return lowerMulDiv(Op, MipsISD::DivRemU, true, true, 267 DAG); 268 case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG); 269 case ISD::INTRINSIC_W_CHAIN: return lowerINTRINSIC_W_CHAIN(Op, DAG); 270 case ISD::INTRINSIC_VOID: return lowerINTRINSIC_VOID(Op, DAG); 271 case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG); 272 case ISD::BUILD_VECTOR: return lowerBUILD_VECTOR(Op, DAG); 273 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, DAG); 274 } 275 276 return MipsTargetLowering::LowerOperation(Op, DAG); 277} 278 279// selectMADD - 280// Transforms a subgraph in CurDAG if the following pattern is found: 281// (addc multLo, Lo0), (adde multHi, Hi0), 282// where, 283// multHi/Lo: product of multiplication 284// Lo0: initial value of Lo register 285// Hi0: initial value of Hi register 286// Return true if pattern matching was successful. 287static bool selectMADD(SDNode *ADDENode, SelectionDAG *CurDAG) { 288 // ADDENode's second operand must be a flag output of an ADDC node in order 289 // for the matching to be successful. 290 SDNode *ADDCNode = ADDENode->getOperand(2).getNode(); 291 292 if (ADDCNode->getOpcode() != ISD::ADDC) 293 return false; 294 295 SDValue MultHi = ADDENode->getOperand(0); 296 SDValue MultLo = ADDCNode->getOperand(0); 297 SDNode *MultNode = MultHi.getNode(); 298 unsigned MultOpc = MultHi.getOpcode(); 299 300 // MultHi and MultLo must be generated by the same node, 301 if (MultLo.getNode() != MultNode) 302 return false; 303 304 // and it must be a multiplication. 305 if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) 306 return false; 307 308 // MultLo amd MultHi must be the first and second output of MultNode 309 // respectively. 310 if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) 311 return false; 312 313 // Transform this to a MADD only if ADDENode and ADDCNode are the only users 314 // of the values of MultNode, in which case MultNode will be removed in later 315 // phases. 316 // If there exist users other than ADDENode or ADDCNode, this function returns 317 // here, which will result in MultNode being mapped to a single MULT 318 // instruction node rather than a pair of MULT and MADD instructions being 319 // produced. 320 if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) 321 return false; 322 323 SDLoc DL(ADDENode); 324 325 // Initialize accumulator. 326 SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, 327 ADDCNode->getOperand(1), 328 ADDENode->getOperand(1)); 329 330 // create MipsMAdd(u) node 331 MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd; 332 333 SDValue MAdd = CurDAG->getNode(MultOpc, DL, MVT::Untyped, 334 MultNode->getOperand(0),// Factor 0 335 MultNode->getOperand(1),// Factor 1 336 ACCIn); 337 338 // replace uses of adde and addc here 339 if (!SDValue(ADDCNode, 0).use_empty()) { 340 SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLO, DL, MVT::i32, MAdd); 341 CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), LoOut); 342 } 343 if (!SDValue(ADDENode, 0).use_empty()) { 344 SDValue HiOut = CurDAG->getNode(MipsISD::ExtractHI, DL, MVT::i32, MAdd); 345 CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), HiOut); 346 } 347 348 return true; 349} 350 351// selectMSUB - 352// Transforms a subgraph in CurDAG if the following pattern is found: 353// (addc Lo0, multLo), (sube Hi0, multHi), 354// where, 355// multHi/Lo: product of multiplication 356// Lo0: initial value of Lo register 357// Hi0: initial value of Hi register 358// Return true if pattern matching was successful. 359static bool selectMSUB(SDNode *SUBENode, SelectionDAG *CurDAG) { 360 // SUBENode's second operand must be a flag output of an SUBC node in order 361 // for the matching to be successful. 362 SDNode *SUBCNode = SUBENode->getOperand(2).getNode(); 363 364 if (SUBCNode->getOpcode() != ISD::SUBC) 365 return false; 366 367 SDValue MultHi = SUBENode->getOperand(1); 368 SDValue MultLo = SUBCNode->getOperand(1); 369 SDNode *MultNode = MultHi.getNode(); 370 unsigned MultOpc = MultHi.getOpcode(); 371 372 // MultHi and MultLo must be generated by the same node, 373 if (MultLo.getNode() != MultNode) 374 return false; 375 376 // and it must be a multiplication. 377 if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) 378 return false; 379 380 // MultLo amd MultHi must be the first and second output of MultNode 381 // respectively. 382 if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) 383 return false; 384 385 // Transform this to a MSUB only if SUBENode and SUBCNode are the only users 386 // of the values of MultNode, in which case MultNode will be removed in later 387 // phases. 388 // If there exist users other than SUBENode or SUBCNode, this function returns 389 // here, which will result in MultNode being mapped to a single MULT 390 // instruction node rather than a pair of MULT and MSUB instructions being 391 // produced. 392 if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) 393 return false; 394 395 SDLoc DL(SUBENode); 396 397 // Initialize accumulator. 398 SDValue ACCIn = CurDAG->getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, 399 SUBCNode->getOperand(0), 400 SUBENode->getOperand(0)); 401 402 // create MipsSub(u) node 403 MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub; 404 405 SDValue MSub = CurDAG->getNode(MultOpc, DL, MVT::Glue, 406 MultNode->getOperand(0),// Factor 0 407 MultNode->getOperand(1),// Factor 1 408 ACCIn); 409 410 // replace uses of sube and subc here 411 if (!SDValue(SUBCNode, 0).use_empty()) { 412 SDValue LoOut = CurDAG->getNode(MipsISD::ExtractLO, DL, MVT::i32, MSub); 413 CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), LoOut); 414 } 415 if (!SDValue(SUBENode, 0).use_empty()) { 416 SDValue HiOut = CurDAG->getNode(MipsISD::ExtractHI, DL, MVT::i32, MSub); 417 CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), HiOut); 418 } 419 420 return true; 421} 422 423static SDValue performADDECombine(SDNode *N, SelectionDAG &DAG, 424 TargetLowering::DAGCombinerInfo &DCI, 425 const MipsSubtarget *Subtarget) { 426 if (DCI.isBeforeLegalize()) 427 return SDValue(); 428 429 if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && 430 selectMADD(N, &DAG)) 431 return SDValue(N, 0); 432 433 return SDValue(); 434} 435 436// Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT 437// 438// Performs the following transformations: 439// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its 440// sign/zero-extension is completely overwritten by the new one performed by 441// the ISD::AND. 442// - Removes redundant zero extensions performed by an ISD::AND. 443static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG, 444 TargetLowering::DAGCombinerInfo &DCI, 445 const MipsSubtarget *Subtarget) { 446 if (!Subtarget->hasMSA()) 447 return SDValue(); 448 449 SDValue Op0 = N->getOperand(0); 450 SDValue Op1 = N->getOperand(1); 451 unsigned Op0Opcode = Op0->getOpcode(); 452 453 // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d) 454 // where $d + 1 == 2^n and n == 32 455 // or $d + 1 == 2^n and n <= 32 and ZExt 456 // -> (MipsVExtractZExt $a, $b, $c) 457 if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT || 458 Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) { 459 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1); 460 461 if (!Mask) 462 return SDValue(); 463 464 int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2(); 465 466 if (Log2IfPositive <= 0) 467 return SDValue(); // Mask+1 is not a power of 2 468 469 SDValue Op0Op2 = Op0->getOperand(2); 470 EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT(); 471 unsigned ExtendTySize = ExtendTy.getSizeInBits(); 472 unsigned Log2 = Log2IfPositive; 473 474 if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) || 475 Log2 == ExtendTySize) { 476 SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 }; 477 DAG.MorphNodeTo(Op0.getNode(), MipsISD::VEXTRACT_ZEXT_ELT, 478 Op0->getVTList(), Ops, Op0->getNumOperands()); 479 return Op0; 480 } 481 } 482 483 return SDValue(); 484} 485 486static SDValue performSUBECombine(SDNode *N, SelectionDAG &DAG, 487 TargetLowering::DAGCombinerInfo &DCI, 488 const MipsSubtarget *Subtarget) { 489 if (DCI.isBeforeLegalize()) 490 return SDValue(); 491 492 if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && 493 selectMSUB(N, &DAG)) 494 return SDValue(N, 0); 495 496 return SDValue(); 497} 498 499static SDValue genConstMult(SDValue X, uint64_t C, SDLoc DL, EVT VT, 500 EVT ShiftTy, SelectionDAG &DAG) { 501 // Clear the upper (64 - VT.sizeInBits) bits. 502 C &= ((uint64_t)-1) >> (64 - VT.getSizeInBits()); 503 504 // Return 0. 505 if (C == 0) 506 return DAG.getConstant(0, VT); 507 508 // Return x. 509 if (C == 1) 510 return X; 511 512 // If c is power of 2, return (shl x, log2(c)). 513 if (isPowerOf2_64(C)) 514 return DAG.getNode(ISD::SHL, DL, VT, X, 515 DAG.getConstant(Log2_64(C), ShiftTy)); 516 517 unsigned Log2Ceil = Log2_64_Ceil(C); 518 uint64_t Floor = 1LL << Log2_64(C); 519 uint64_t Ceil = Log2Ceil == 64 ? 0LL : 1LL << Log2Ceil; 520 521 // If |c - floor_c| <= |c - ceil_c|, 522 // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))), 523 // return (add constMult(x, floor_c), constMult(x, c - floor_c)). 524 if (C - Floor <= Ceil - C) { 525 SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG); 526 SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG); 527 return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 528 } 529 530 // If |c - floor_c| > |c - ceil_c|, 531 // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)). 532 SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG); 533 SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG); 534 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 535} 536 537static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG, 538 const TargetLowering::DAGCombinerInfo &DCI, 539 const MipsSETargetLowering *TL) { 540 EVT VT = N->getValueType(0); 541 542 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) 543 if (!VT.isVector()) 544 return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N), 545 VT, TL->getScalarShiftAmountTy(VT), DAG); 546 547 return SDValue(N, 0); 548} 549 550static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty, 551 SelectionDAG &DAG, 552 const MipsSubtarget *Subtarget) { 553 // See if this is a vector splat immediate node. 554 APInt SplatValue, SplatUndef; 555 unsigned SplatBitSize; 556 bool HasAnyUndefs; 557 unsigned EltSize = Ty.getVectorElementType().getSizeInBits(); 558 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 559 560 if (!BV || 561 !BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs, 562 EltSize, !Subtarget->isLittle()) || 563 (SplatBitSize != EltSize) || 564 (SplatValue.getZExtValue() >= EltSize)) 565 return SDValue(); 566 567 return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0), 568 DAG.getConstant(SplatValue.getZExtValue(), MVT::i32)); 569} 570 571static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, 572 TargetLowering::DAGCombinerInfo &DCI, 573 const MipsSubtarget *Subtarget) { 574 EVT Ty = N->getValueType(0); 575 576 if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8)) 577 return SDValue(); 578 579 return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget); 580} 581 582// Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold 583// constant splats into MipsISD::SHRA_DSP for DSPr2. 584// 585// Performs the following transformations: 586// - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its 587// sign/zero-extension is completely overwritten by the new one performed by 588// the ISD::SRA and ISD::SHL nodes. 589// - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL 590// sequence. 591// 592// See performDSPShiftCombine for more information about the transformation 593// used for DSPr2. 594static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, 595 TargetLowering::DAGCombinerInfo &DCI, 596 const MipsSubtarget *Subtarget) { 597 EVT Ty = N->getValueType(0); 598 599 if (Subtarget->hasMSA()) { 600 SDValue Op0 = N->getOperand(0); 601 SDValue Op1 = N->getOperand(1); 602 603 // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d) 604 // where $d + sizeof($c) == 32 605 // or $d + sizeof($c) <= 32 and SExt 606 // -> (MipsVExtractSExt $a, $b, $c) 607 if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) { 608 SDValue Op0Op0 = Op0->getOperand(0); 609 ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1); 610 611 if (!ShAmount) 612 return SDValue(); 613 614 if (Op0Op0->getOpcode() != MipsISD::VEXTRACT_SEXT_ELT && 615 Op0Op0->getOpcode() != MipsISD::VEXTRACT_ZEXT_ELT) 616 return SDValue(); 617 618 EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT(); 619 unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits(); 620 621 if (TotalBits == 32 || 622 (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT && 623 TotalBits <= 32)) { 624 SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1), 625 Op0Op0->getOperand(2) }; 626 DAG.MorphNodeTo(Op0Op0.getNode(), MipsISD::VEXTRACT_SEXT_ELT, 627 Op0Op0->getVTList(), Ops, Op0Op0->getNumOperands()); 628 return Op0Op0; 629 } 630 } 631 } 632 633 if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget->hasDSPR2())) 634 return SDValue(); 635 636 return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget); 637} 638 639 640static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, 641 TargetLowering::DAGCombinerInfo &DCI, 642 const MipsSubtarget *Subtarget) { 643 EVT Ty = N->getValueType(0); 644 645 if (((Ty != MVT::v2i16) || !Subtarget->hasDSPR2()) && (Ty != MVT::v4i8)) 646 return SDValue(); 647 648 return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget); 649} 650 651static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC) { 652 bool IsV216 = (Ty == MVT::v2i16); 653 654 switch (CC) { 655 case ISD::SETEQ: 656 case ISD::SETNE: return true; 657 case ISD::SETLT: 658 case ISD::SETLE: 659 case ISD::SETGT: 660 case ISD::SETGE: return IsV216; 661 case ISD::SETULT: 662 case ISD::SETULE: 663 case ISD::SETUGT: 664 case ISD::SETUGE: return !IsV216; 665 default: return false; 666 } 667} 668 669static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) { 670 EVT Ty = N->getValueType(0); 671 672 if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8)) 673 return SDValue(); 674 675 if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get())) 676 return SDValue(); 677 678 return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0), 679 N->getOperand(1), N->getOperand(2)); 680} 681 682static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) { 683 EVT Ty = N->getValueType(0); 684 685 if (Ty.is128BitVector() && Ty.isInteger()) { 686 // Try the following combines: 687 // (vselect (setcc $a, $b, SETLT), $b, $a)) -> (vsmax $a, $b) 688 // (vselect (setcc $a, $b, SETLE), $b, $a)) -> (vsmax $a, $b) 689 // (vselect (setcc $a, $b, SETLT), $a, $b)) -> (vsmin $a, $b) 690 // (vselect (setcc $a, $b, SETLE), $a, $b)) -> (vsmin $a, $b) 691 // (vselect (setcc $a, $b, SETULT), $b, $a)) -> (vumax $a, $b) 692 // (vselect (setcc $a, $b, SETULE), $b, $a)) -> (vumax $a, $b) 693 // (vselect (setcc $a, $b, SETULT), $a, $b)) -> (vumin $a, $b) 694 // (vselect (setcc $a, $b, SETULE), $a, $b)) -> (vumin $a, $b) 695 // SETGT/SETGE/SETUGT/SETUGE variants of these will show up initially but 696 // will be expanded to equivalent SETLT/SETLE/SETULT/SETULE versions by the 697 // legalizer. 698 SDValue Op0 = N->getOperand(0); 699 700 if (Op0->getOpcode() != ISD::SETCC) 701 return SDValue(); 702 703 ISD::CondCode CondCode = cast<CondCodeSDNode>(Op0->getOperand(2))->get(); 704 bool Signed; 705 706 if (CondCode == ISD::SETLT || CondCode == ISD::SETLE) 707 Signed = true; 708 else if (CondCode == ISD::SETULT || CondCode == ISD::SETULE) 709 Signed = false; 710 else 711 return SDValue(); 712 713 SDValue Op1 = N->getOperand(1); 714 SDValue Op2 = N->getOperand(2); 715 SDValue Op0Op0 = Op0->getOperand(0); 716 SDValue Op0Op1 = Op0->getOperand(1); 717 718 if (Op1 == Op0Op0 && Op2 == Op0Op1) 719 return DAG.getNode(Signed ? MipsISD::VSMIN : MipsISD::VUMIN, SDLoc(N), 720 Ty, Op1, Op2); 721 else if (Op1 == Op0Op1 && Op2 == Op0Op0) 722 return DAG.getNode(Signed ? MipsISD::VSMAX : MipsISD::VUMAX, SDLoc(N), 723 Ty, Op1, Op2); 724 } else if ((Ty == MVT::v2i16) || (Ty == MVT::v4i8)) { 725 SDValue SetCC = N->getOperand(0); 726 727 if (SetCC.getOpcode() != MipsISD::SETCC_DSP) 728 return SDValue(); 729 730 return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty, 731 SetCC.getOperand(0), SetCC.getOperand(1), 732 N->getOperand(1), N->getOperand(2), SetCC.getOperand(2)); 733 } 734 735 return SDValue(); 736} 737 738static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG, 739 const MipsSubtarget *Subtarget) { 740 EVT Ty = N->getValueType(0); 741 742 if (Subtarget->hasMSA() && Ty.is128BitVector() && Ty.isInteger()) { 743 // Try the following combines: 744 // (xor (or $a, $b), (build_vector allones)) 745 // (xor (or $a, $b), (bitcast (build_vector allones))) 746 SDValue Op0 = N->getOperand(0); 747 SDValue Op1 = N->getOperand(1); 748 SDValue NotOp; 749 750 if (ISD::isBuildVectorAllOnes(Op0.getNode())) 751 NotOp = Op1; 752 else if (ISD::isBuildVectorAllOnes(Op1.getNode())) 753 NotOp = Op0; 754 else 755 return SDValue(); 756 757 if (NotOp->getOpcode() == ISD::OR) 758 return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0), 759 NotOp->getOperand(1)); 760 } 761 762 return SDValue(); 763} 764 765SDValue 766MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 767 SelectionDAG &DAG = DCI.DAG; 768 SDValue Val; 769 770 switch (N->getOpcode()) { 771 case ISD::ADDE: 772 return performADDECombine(N, DAG, DCI, Subtarget); 773 case ISD::AND: 774 Val = performANDCombine(N, DAG, DCI, Subtarget); 775 break; 776 case ISD::SUBE: 777 return performSUBECombine(N, DAG, DCI, Subtarget); 778 case ISD::MUL: 779 return performMULCombine(N, DAG, DCI, this); 780 case ISD::SHL: 781 return performSHLCombine(N, DAG, DCI, Subtarget); 782 case ISD::SRA: 783 return performSRACombine(N, DAG, DCI, Subtarget); 784 case ISD::SRL: 785 return performSRLCombine(N, DAG, DCI, Subtarget); 786 case ISD::VSELECT: 787 return performVSELECTCombine(N, DAG); 788 case ISD::XOR: 789 Val = performXORCombine(N, DAG, Subtarget); 790 break; 791 case ISD::SETCC: 792 Val = performSETCCCombine(N, DAG); 793 break; 794 } 795 796 if (Val.getNode()) 797 return Val; 798 799 return MipsTargetLowering::PerformDAGCombine(N, DCI); 800} 801 802MachineBasicBlock * 803MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 804 MachineBasicBlock *BB) const { 805 switch (MI->getOpcode()) { 806 default: 807 return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB); 808 case Mips::BPOSGE32_PSEUDO: 809 return emitBPOSGE32(MI, BB); 810 case Mips::SNZ_B_PSEUDO: 811 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B); 812 case Mips::SNZ_H_PSEUDO: 813 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H); 814 case Mips::SNZ_W_PSEUDO: 815 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W); 816 case Mips::SNZ_D_PSEUDO: 817 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D); 818 case Mips::SNZ_V_PSEUDO: 819 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V); 820 case Mips::SZ_B_PSEUDO: 821 return emitMSACBranchPseudo(MI, BB, Mips::BZ_B); 822 case Mips::SZ_H_PSEUDO: 823 return emitMSACBranchPseudo(MI, BB, Mips::BZ_H); 824 case Mips::SZ_W_PSEUDO: 825 return emitMSACBranchPseudo(MI, BB, Mips::BZ_W); 826 case Mips::SZ_D_PSEUDO: 827 return emitMSACBranchPseudo(MI, BB, Mips::BZ_D); 828 case Mips::SZ_V_PSEUDO: 829 return emitMSACBranchPseudo(MI, BB, Mips::BZ_V); 830 case Mips::COPY_FW_PSEUDO: 831 return emitCOPY_FW(MI, BB); 832 case Mips::COPY_FD_PSEUDO: 833 return emitCOPY_FD(MI, BB); 834 case Mips::INSERT_FW_PSEUDO: 835 return emitINSERT_FW(MI, BB); 836 case Mips::INSERT_FD_PSEUDO: 837 return emitINSERT_FD(MI, BB); 838 } 839} 840 841bool MipsSETargetLowering:: 842isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo, 843 unsigned NextStackOffset, 844 const MipsFunctionInfo& FI) const { 845 if (!EnableMipsTailCalls) 846 return false; 847 848 // Return false if either the callee or caller has a byval argument. 849 if (MipsCCInfo.hasByValArg() || FI.hasByvalArg()) 850 return false; 851 852 // Return true if the callee's argument area is no larger than the 853 // caller's. 854 return NextStackOffset <= FI.getIncomingArgSize(); 855} 856 857void MipsSETargetLowering:: 858getOpndList(SmallVectorImpl<SDValue> &Ops, 859 std::deque< std::pair<unsigned, SDValue> > &RegsToPass, 860 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, 861 CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const { 862 // T9 should contain the address of the callee function if 863 // -reloction-model=pic or it is an indirect call. 864 if (IsPICCall || !GlobalOrExternal) { 865 unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9; 866 RegsToPass.push_front(std::make_pair(T9Reg, Callee)); 867 } else 868 Ops.push_back(Callee); 869 870 MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal, 871 InternalLinkage, CLI, Callee, Chain); 872} 873 874SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const { 875 LoadSDNode &Nd = *cast<LoadSDNode>(Op); 876 877 if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore) 878 return MipsTargetLowering::lowerLOAD(Op, DAG); 879 880 // Replace a double precision load with two i32 loads and a buildpair64. 881 SDLoc DL(Op); 882 SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain(); 883 EVT PtrVT = Ptr.getValueType(); 884 885 // i32 load from lower address. 886 SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr, 887 MachinePointerInfo(), Nd.isVolatile(), 888 Nd.isNonTemporal(), Nd.isInvariant(), 889 Nd.getAlignment()); 890 891 // i32 load from higher address. 892 Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT)); 893 SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr, 894 MachinePointerInfo(), Nd.isVolatile(), 895 Nd.isNonTemporal(), Nd.isInvariant(), 896 std::min(Nd.getAlignment(), 4U)); 897 898 if (!Subtarget->isLittle()) 899 std::swap(Lo, Hi); 900 901 SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 902 SDValue Ops[2] = {BP, Hi.getValue(1)}; 903 return DAG.getMergeValues(Ops, 2, DL); 904} 905 906SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const { 907 StoreSDNode &Nd = *cast<StoreSDNode>(Op); 908 909 if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore) 910 return MipsTargetLowering::lowerSTORE(Op, DAG); 911 912 // Replace a double precision store with two extractelement64s and i32 stores. 913 SDLoc DL(Op); 914 SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain(); 915 EVT PtrVT = Ptr.getValueType(); 916 SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, 917 Val, DAG.getConstant(0, MVT::i32)); 918 SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, 919 Val, DAG.getConstant(1, MVT::i32)); 920 921 if (!Subtarget->isLittle()) 922 std::swap(Lo, Hi); 923 924 // i32 store to lower address. 925 Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(), 926 Nd.isVolatile(), Nd.isNonTemporal(), Nd.getAlignment(), 927 Nd.getTBAAInfo()); 928 929 // i32 store to higher address. 930 Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT)); 931 return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(), 932 Nd.isVolatile(), Nd.isNonTemporal(), 933 std::min(Nd.getAlignment(), 4U), Nd.getTBAAInfo()); 934} 935 936SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc, 937 bool HasLo, bool HasHi, 938 SelectionDAG &DAG) const { 939 EVT Ty = Op.getOperand(0).getValueType(); 940 SDLoc DL(Op); 941 SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped, 942 Op.getOperand(0), Op.getOperand(1)); 943 SDValue Lo, Hi; 944 945 if (HasLo) 946 Lo = DAG.getNode(MipsISD::ExtractLO, DL, Ty, Mult); 947 if (HasHi) 948 Hi = DAG.getNode(MipsISD::ExtractHI, DL, Ty, Mult); 949 950 if (!HasLo || !HasHi) 951 return HasLo ? Lo : Hi; 952 953 SDValue Vals[] = { Lo, Hi }; 954 return DAG.getMergeValues(Vals, 2, DL); 955} 956 957 958static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) { 959 SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, 960 DAG.getConstant(0, MVT::i32)); 961 SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, 962 DAG.getConstant(1, MVT::i32)); 963 return DAG.getNode(MipsISD::InsertLOHI, DL, MVT::Untyped, InLo, InHi); 964} 965 966static SDValue extractLOHI(SDValue Op, SDLoc DL, SelectionDAG &DAG) { 967 SDValue Lo = DAG.getNode(MipsISD::ExtractLO, DL, MVT::i32, Op); 968 SDValue Hi = DAG.getNode(MipsISD::ExtractHI, DL, MVT::i32, Op); 969 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi); 970} 971 972// This function expands mips intrinsic nodes which have 64-bit input operands 973// or output values. 974// 975// out64 = intrinsic-node in64 976// => 977// lo = copy (extract-element (in64, 0)) 978// hi = copy (extract-element (in64, 1)) 979// mips-specific-node 980// v0 = copy lo 981// v1 = copy hi 982// out64 = merge-values (v0, v1) 983// 984static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) { 985 SDLoc DL(Op); 986 bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other; 987 SmallVector<SDValue, 3> Ops; 988 unsigned OpNo = 0; 989 990 // See if Op has a chain input. 991 if (HasChainIn) 992 Ops.push_back(Op->getOperand(OpNo++)); 993 994 // The next operand is the intrinsic opcode. 995 assert(Op->getOperand(OpNo).getOpcode() == ISD::TargetConstant); 996 997 // See if the next operand has type i64. 998 SDValue Opnd = Op->getOperand(++OpNo), In64; 999 1000 if (Opnd.getValueType() == MVT::i64) 1001 In64 = initAccumulator(Opnd, DL, DAG); 1002 else 1003 Ops.push_back(Opnd); 1004 1005 // Push the remaining operands. 1006 for (++OpNo ; OpNo < Op->getNumOperands(); ++OpNo) 1007 Ops.push_back(Op->getOperand(OpNo)); 1008 1009 // Add In64 to the end of the list. 1010 if (In64.getNode()) 1011 Ops.push_back(In64); 1012 1013 // Scan output. 1014 SmallVector<EVT, 2> ResTys; 1015 1016 for (SDNode::value_iterator I = Op->value_begin(), E = Op->value_end(); 1017 I != E; ++I) 1018 ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I); 1019 1020 // Create node. 1021 SDValue Val = DAG.getNode(Opc, DL, ResTys, &Ops[0], Ops.size()); 1022 SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val; 1023 1024 if (!HasChainIn) 1025 return Out; 1026 1027 assert(Val->getValueType(1) == MVT::Other); 1028 SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) }; 1029 return DAG.getMergeValues(Vals, 2, DL); 1030} 1031 1032// Lower an MSA copy intrinsic into the specified SelectionDAG node 1033static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) { 1034 SDLoc DL(Op); 1035 SDValue Vec = Op->getOperand(1); 1036 SDValue Idx = Op->getOperand(2); 1037 EVT ResTy = Op->getValueType(0); 1038 EVT EltTy = Vec->getValueType(0).getVectorElementType(); 1039 1040 SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx, 1041 DAG.getValueType(EltTy)); 1042 1043 return Result; 1044} 1045 1046static SDValue 1047lowerMSASplatImm(SDLoc DL, EVT ResTy, SDValue ImmOp, SelectionDAG &DAG) { 1048 EVT ViaVecTy = ResTy; 1049 SmallVector<SDValue, 16> Ops; 1050 SDValue ImmHiOp; 1051 1052 if (ViaVecTy == MVT::v2i64) { 1053 ImmHiOp = DAG.getNode(ISD::SRA, DL, MVT::i32, ImmOp, 1054 DAG.getConstant(31, MVT::i32)); 1055 for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i) { 1056 Ops.push_back(ImmHiOp); 1057 Ops.push_back(ImmOp); 1058 } 1059 ViaVecTy = MVT::v4i32; 1060 } else { 1061 for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i) 1062 Ops.push_back(ImmOp); 1063 } 1064 1065 SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, DL, ViaVecTy, &Ops[0], 1066 Ops.size()); 1067 1068 if (ResTy != ViaVecTy) 1069 Result = DAG.getNode(ISD::BITCAST, DL, ResTy, Result); 1070 1071 return Result; 1072} 1073 1074static SDValue 1075lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) { 1076 return lowerMSASplatImm(SDLoc(Op), Op->getValueType(0), 1077 Op->getOperand(ImmOp), DAG); 1078} 1079 1080SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, 1081 SelectionDAG &DAG) const { 1082 SDLoc DL(Op); 1083 1084 switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) { 1085 default: 1086 return SDValue(); 1087 case Intrinsic::mips_shilo: 1088 return lowerDSPIntr(Op, DAG, MipsISD::SHILO); 1089 case Intrinsic::mips_dpau_h_qbl: 1090 return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL); 1091 case Intrinsic::mips_dpau_h_qbr: 1092 return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR); 1093 case Intrinsic::mips_dpsu_h_qbl: 1094 return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL); 1095 case Intrinsic::mips_dpsu_h_qbr: 1096 return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR); 1097 case Intrinsic::mips_dpa_w_ph: 1098 return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH); 1099 case Intrinsic::mips_dps_w_ph: 1100 return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH); 1101 case Intrinsic::mips_dpax_w_ph: 1102 return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH); 1103 case Intrinsic::mips_dpsx_w_ph: 1104 return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH); 1105 case Intrinsic::mips_mulsa_w_ph: 1106 return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH); 1107 case Intrinsic::mips_mult: 1108 return lowerDSPIntr(Op, DAG, MipsISD::Mult); 1109 case Intrinsic::mips_multu: 1110 return lowerDSPIntr(Op, DAG, MipsISD::Multu); 1111 case Intrinsic::mips_madd: 1112 return lowerDSPIntr(Op, DAG, MipsISD::MAdd); 1113 case Intrinsic::mips_maddu: 1114 return lowerDSPIntr(Op, DAG, MipsISD::MAddu); 1115 case Intrinsic::mips_msub: 1116 return lowerDSPIntr(Op, DAG, MipsISD::MSub); 1117 case Intrinsic::mips_msubu: 1118 return lowerDSPIntr(Op, DAG, MipsISD::MSubu); 1119 case Intrinsic::mips_addv_b: 1120 case Intrinsic::mips_addv_h: 1121 case Intrinsic::mips_addv_w: 1122 case Intrinsic::mips_addv_d: 1123 return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1), 1124 Op->getOperand(2)); 1125 case Intrinsic::mips_addvi_b: 1126 case Intrinsic::mips_addvi_h: 1127 case Intrinsic::mips_addvi_w: 1128 case Intrinsic::mips_addvi_d: 1129 return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1), 1130 lowerMSASplatImm(Op, 2, DAG)); 1131 case Intrinsic::mips_and_v: 1132 return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1), 1133 Op->getOperand(2)); 1134 case Intrinsic::mips_andi_b: 1135 return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1), 1136 lowerMSASplatImm(Op, 2, DAG)); 1137 case Intrinsic::mips_bnz_b: 1138 case Intrinsic::mips_bnz_h: 1139 case Intrinsic::mips_bnz_w: 1140 case Intrinsic::mips_bnz_d: 1141 return DAG.getNode(MipsISD::VALL_NONZERO, DL, Op->getValueType(0), 1142 Op->getOperand(1)); 1143 case Intrinsic::mips_bnz_v: 1144 return DAG.getNode(MipsISD::VANY_NONZERO, DL, Op->getValueType(0), 1145 Op->getOperand(1)); 1146 case Intrinsic::mips_bsel_v: 1147 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), 1148 Op->getOperand(1), Op->getOperand(2), 1149 Op->getOperand(3)); 1150 case Intrinsic::mips_bseli_b: 1151 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), 1152 Op->getOperand(1), Op->getOperand(2), 1153 lowerMSASplatImm(Op, 3, DAG)); 1154 case Intrinsic::mips_bz_b: 1155 case Intrinsic::mips_bz_h: 1156 case Intrinsic::mips_bz_w: 1157 case Intrinsic::mips_bz_d: 1158 return DAG.getNode(MipsISD::VALL_ZERO, DL, Op->getValueType(0), 1159 Op->getOperand(1)); 1160 case Intrinsic::mips_bz_v: 1161 return DAG.getNode(MipsISD::VANY_ZERO, DL, Op->getValueType(0), 1162 Op->getOperand(1)); 1163 case Intrinsic::mips_ceq_b: 1164 case Intrinsic::mips_ceq_h: 1165 case Intrinsic::mips_ceq_w: 1166 case Intrinsic::mips_ceq_d: 1167 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1168 Op->getOperand(2), ISD::SETEQ); 1169 case Intrinsic::mips_ceqi_b: 1170 case Intrinsic::mips_ceqi_h: 1171 case Intrinsic::mips_ceqi_w: 1172 case Intrinsic::mips_ceqi_d: 1173 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1174 lowerMSASplatImm(Op, 2, DAG), ISD::SETEQ); 1175 case Intrinsic::mips_cle_s_b: 1176 case Intrinsic::mips_cle_s_h: 1177 case Intrinsic::mips_cle_s_w: 1178 case Intrinsic::mips_cle_s_d: 1179 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1180 Op->getOperand(2), ISD::SETLE); 1181 case Intrinsic::mips_clei_s_b: 1182 case Intrinsic::mips_clei_s_h: 1183 case Intrinsic::mips_clei_s_w: 1184 case Intrinsic::mips_clei_s_d: 1185 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1186 lowerMSASplatImm(Op, 2, DAG), ISD::SETLE); 1187 case Intrinsic::mips_cle_u_b: 1188 case Intrinsic::mips_cle_u_h: 1189 case Intrinsic::mips_cle_u_w: 1190 case Intrinsic::mips_cle_u_d: 1191 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1192 Op->getOperand(2), ISD::SETULE); 1193 case Intrinsic::mips_clei_u_b: 1194 case Intrinsic::mips_clei_u_h: 1195 case Intrinsic::mips_clei_u_w: 1196 case Intrinsic::mips_clei_u_d: 1197 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1198 lowerMSASplatImm(Op, 2, DAG), ISD::SETULE); 1199 case Intrinsic::mips_clt_s_b: 1200 case Intrinsic::mips_clt_s_h: 1201 case Intrinsic::mips_clt_s_w: 1202 case Intrinsic::mips_clt_s_d: 1203 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1204 Op->getOperand(2), ISD::SETLT); 1205 case Intrinsic::mips_clti_s_b: 1206 case Intrinsic::mips_clti_s_h: 1207 case Intrinsic::mips_clti_s_w: 1208 case Intrinsic::mips_clti_s_d: 1209 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1210 lowerMSASplatImm(Op, 2, DAG), ISD::SETLT); 1211 case Intrinsic::mips_clt_u_b: 1212 case Intrinsic::mips_clt_u_h: 1213 case Intrinsic::mips_clt_u_w: 1214 case Intrinsic::mips_clt_u_d: 1215 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1216 Op->getOperand(2), ISD::SETULT); 1217 case Intrinsic::mips_clti_u_b: 1218 case Intrinsic::mips_clti_u_h: 1219 case Intrinsic::mips_clti_u_w: 1220 case Intrinsic::mips_clti_u_d: 1221 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1222 lowerMSASplatImm(Op, 2, DAG), ISD::SETULT); 1223 case Intrinsic::mips_copy_s_b: 1224 case Intrinsic::mips_copy_s_h: 1225 case Intrinsic::mips_copy_s_w: 1226 return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT); 1227 case Intrinsic::mips_copy_s_d: 1228 // Don't lower directly into VEXTRACT_SEXT_ELT since i64 might be illegal. 1229 // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type 1230 // legalizer and EXTRACT_VECTOR_ELT lowering sort it out. 1231 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0), 1232 Op->getOperand(1), Op->getOperand(2)); 1233 case Intrinsic::mips_copy_u_b: 1234 case Intrinsic::mips_copy_u_h: 1235 case Intrinsic::mips_copy_u_w: 1236 return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT); 1237 case Intrinsic::mips_copy_u_d: 1238 // Don't lower directly into VEXTRACT_ZEXT_ELT since i64 might be illegal. 1239 // Instead lower to the generic EXTRACT_VECTOR_ELT node and let the type 1240 // legalizer and EXTRACT_VECTOR_ELT lowering sort it out. 1241 // 1242 // Note: When i64 is illegal, this results in copy_s.w instructions instead 1243 // of copy_u.w instructions. This makes no difference to the behaviour 1244 // since i64 is only illegal when the register file is 32-bit. 1245 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0), 1246 Op->getOperand(1), Op->getOperand(2)); 1247 case Intrinsic::mips_div_s_b: 1248 case Intrinsic::mips_div_s_h: 1249 case Intrinsic::mips_div_s_w: 1250 case Intrinsic::mips_div_s_d: 1251 return DAG.getNode(ISD::SDIV, DL, Op->getValueType(0), Op->getOperand(1), 1252 Op->getOperand(2)); 1253 case Intrinsic::mips_div_u_b: 1254 case Intrinsic::mips_div_u_h: 1255 case Intrinsic::mips_div_u_w: 1256 case Intrinsic::mips_div_u_d: 1257 return DAG.getNode(ISD::UDIV, DL, Op->getValueType(0), Op->getOperand(1), 1258 Op->getOperand(2)); 1259 case Intrinsic::mips_fadd_w: 1260 case Intrinsic::mips_fadd_d: 1261 return DAG.getNode(ISD::FADD, DL, Op->getValueType(0), Op->getOperand(1), 1262 Op->getOperand(2)); 1263 // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away 1264 case Intrinsic::mips_fceq_w: 1265 case Intrinsic::mips_fceq_d: 1266 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1267 Op->getOperand(2), ISD::SETOEQ); 1268 case Intrinsic::mips_fcle_w: 1269 case Intrinsic::mips_fcle_d: 1270 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1271 Op->getOperand(2), ISD::SETOLE); 1272 case Intrinsic::mips_fclt_w: 1273 case Intrinsic::mips_fclt_d: 1274 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1275 Op->getOperand(2), ISD::SETOLT); 1276 case Intrinsic::mips_fcne_w: 1277 case Intrinsic::mips_fcne_d: 1278 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1279 Op->getOperand(2), ISD::SETONE); 1280 case Intrinsic::mips_fcor_w: 1281 case Intrinsic::mips_fcor_d: 1282 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1283 Op->getOperand(2), ISD::SETO); 1284 case Intrinsic::mips_fcueq_w: 1285 case Intrinsic::mips_fcueq_d: 1286 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1287 Op->getOperand(2), ISD::SETUEQ); 1288 case Intrinsic::mips_fcule_w: 1289 case Intrinsic::mips_fcule_d: 1290 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1291 Op->getOperand(2), ISD::SETULE); 1292 case Intrinsic::mips_fcult_w: 1293 case Intrinsic::mips_fcult_d: 1294 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1295 Op->getOperand(2), ISD::SETULT); 1296 case Intrinsic::mips_fcun_w: 1297 case Intrinsic::mips_fcun_d: 1298 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1299 Op->getOperand(2), ISD::SETUO); 1300 case Intrinsic::mips_fcune_w: 1301 case Intrinsic::mips_fcune_d: 1302 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1303 Op->getOperand(2), ISD::SETUNE); 1304 case Intrinsic::mips_fdiv_w: 1305 case Intrinsic::mips_fdiv_d: 1306 return DAG.getNode(ISD::FDIV, DL, Op->getValueType(0), Op->getOperand(1), 1307 Op->getOperand(2)); 1308 case Intrinsic::mips_ffint_u_w: 1309 case Intrinsic::mips_ffint_u_d: 1310 return DAG.getNode(ISD::UINT_TO_FP, DL, Op->getValueType(0), 1311 Op->getOperand(1)); 1312 case Intrinsic::mips_ffint_s_w: 1313 case Intrinsic::mips_ffint_s_d: 1314 return DAG.getNode(ISD::SINT_TO_FP, DL, Op->getValueType(0), 1315 Op->getOperand(1)); 1316 case Intrinsic::mips_fill_b: 1317 case Intrinsic::mips_fill_h: 1318 case Intrinsic::mips_fill_w: 1319 case Intrinsic::mips_fill_d: { 1320 SmallVector<SDValue, 16> Ops; 1321 EVT ResTy = Op->getValueType(0); 1322 1323 for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i) 1324 Ops.push_back(Op->getOperand(1)); 1325 1326 // If ResTy is v2i64 then the type legalizer will break this node down into 1327 // an equivalent v4i32. 1328 return DAG.getNode(ISD::BUILD_VECTOR, DL, ResTy, &Ops[0], Ops.size()); 1329 } 1330 case Intrinsic::mips_flog2_w: 1331 case Intrinsic::mips_flog2_d: 1332 return DAG.getNode(ISD::FLOG2, DL, Op->getValueType(0), Op->getOperand(1)); 1333 case Intrinsic::mips_fmadd_w: 1334 case Intrinsic::mips_fmadd_d: 1335 return DAG.getNode(ISD::FMA, SDLoc(Op), Op->getValueType(0), 1336 Op->getOperand(1), Op->getOperand(2), Op->getOperand(3)); 1337 case Intrinsic::mips_fmul_w: 1338 case Intrinsic::mips_fmul_d: 1339 return DAG.getNode(ISD::FMUL, DL, Op->getValueType(0), Op->getOperand(1), 1340 Op->getOperand(2)); 1341 case Intrinsic::mips_fmsub_w: 1342 case Intrinsic::mips_fmsub_d: { 1343 EVT ResTy = Op->getValueType(0); 1344 return DAG.getNode(ISD::FSUB, SDLoc(Op), ResTy, Op->getOperand(1), 1345 DAG.getNode(ISD::FMUL, SDLoc(Op), ResTy, 1346 Op->getOperand(2), Op->getOperand(3))); 1347 } 1348 case Intrinsic::mips_frint_w: 1349 case Intrinsic::mips_frint_d: 1350 return DAG.getNode(ISD::FRINT, DL, Op->getValueType(0), Op->getOperand(1)); 1351 case Intrinsic::mips_fsqrt_w: 1352 case Intrinsic::mips_fsqrt_d: 1353 return DAG.getNode(ISD::FSQRT, DL, Op->getValueType(0), Op->getOperand(1)); 1354 case Intrinsic::mips_fsub_w: 1355 case Intrinsic::mips_fsub_d: 1356 return DAG.getNode(ISD::FSUB, DL, Op->getValueType(0), Op->getOperand(1), 1357 Op->getOperand(2)); 1358 case Intrinsic::mips_ftrunc_u_w: 1359 case Intrinsic::mips_ftrunc_u_d: 1360 return DAG.getNode(ISD::FP_TO_UINT, DL, Op->getValueType(0), 1361 Op->getOperand(1)); 1362 case Intrinsic::mips_ftrunc_s_w: 1363 case Intrinsic::mips_ftrunc_s_d: 1364 return DAG.getNode(ISD::FP_TO_SINT, DL, Op->getValueType(0), 1365 Op->getOperand(1)); 1366 case Intrinsic::mips_ilvev_b: 1367 case Intrinsic::mips_ilvev_h: 1368 case Intrinsic::mips_ilvev_w: 1369 case Intrinsic::mips_ilvev_d: 1370 return DAG.getNode(MipsISD::ILVEV, DL, Op->getValueType(0), 1371 Op->getOperand(1), Op->getOperand(2)); 1372 case Intrinsic::mips_ilvl_b: 1373 case Intrinsic::mips_ilvl_h: 1374 case Intrinsic::mips_ilvl_w: 1375 case Intrinsic::mips_ilvl_d: 1376 return DAG.getNode(MipsISD::ILVL, DL, Op->getValueType(0), 1377 Op->getOperand(1), Op->getOperand(2)); 1378 case Intrinsic::mips_ilvod_b: 1379 case Intrinsic::mips_ilvod_h: 1380 case Intrinsic::mips_ilvod_w: 1381 case Intrinsic::mips_ilvod_d: 1382 return DAG.getNode(MipsISD::ILVOD, DL, Op->getValueType(0), 1383 Op->getOperand(1), Op->getOperand(2)); 1384 case Intrinsic::mips_ilvr_b: 1385 case Intrinsic::mips_ilvr_h: 1386 case Intrinsic::mips_ilvr_w: 1387 case Intrinsic::mips_ilvr_d: 1388 return DAG.getNode(MipsISD::ILVR, DL, Op->getValueType(0), 1389 Op->getOperand(1), Op->getOperand(2)); 1390 case Intrinsic::mips_insert_b: 1391 case Intrinsic::mips_insert_h: 1392 case Intrinsic::mips_insert_w: 1393 case Intrinsic::mips_insert_d: 1394 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0), 1395 Op->getOperand(1), Op->getOperand(3), Op->getOperand(2)); 1396 case Intrinsic::mips_ldi_b: 1397 case Intrinsic::mips_ldi_h: 1398 case Intrinsic::mips_ldi_w: 1399 case Intrinsic::mips_ldi_d: 1400 return lowerMSASplatImm(Op, 1, DAG); 1401 case Intrinsic::mips_maddv_b: 1402 case Intrinsic::mips_maddv_h: 1403 case Intrinsic::mips_maddv_w: 1404 case Intrinsic::mips_maddv_d: { 1405 EVT ResTy = Op->getValueType(0); 1406 return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1), 1407 DAG.getNode(ISD::MUL, SDLoc(Op), ResTy, 1408 Op->getOperand(2), Op->getOperand(3))); 1409 } 1410 case Intrinsic::mips_max_s_b: 1411 case Intrinsic::mips_max_s_h: 1412 case Intrinsic::mips_max_s_w: 1413 case Intrinsic::mips_max_s_d: 1414 return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0), 1415 Op->getOperand(1), Op->getOperand(2)); 1416 case Intrinsic::mips_max_u_b: 1417 case Intrinsic::mips_max_u_h: 1418 case Intrinsic::mips_max_u_w: 1419 case Intrinsic::mips_max_u_d: 1420 return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0), 1421 Op->getOperand(1), Op->getOperand(2)); 1422 case Intrinsic::mips_maxi_s_b: 1423 case Intrinsic::mips_maxi_s_h: 1424 case Intrinsic::mips_maxi_s_w: 1425 case Intrinsic::mips_maxi_s_d: 1426 return DAG.getNode(MipsISD::VSMAX, DL, Op->getValueType(0), 1427 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1428 case Intrinsic::mips_maxi_u_b: 1429 case Intrinsic::mips_maxi_u_h: 1430 case Intrinsic::mips_maxi_u_w: 1431 case Intrinsic::mips_maxi_u_d: 1432 return DAG.getNode(MipsISD::VUMAX, DL, Op->getValueType(0), 1433 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1434 case Intrinsic::mips_min_s_b: 1435 case Intrinsic::mips_min_s_h: 1436 case Intrinsic::mips_min_s_w: 1437 case Intrinsic::mips_min_s_d: 1438 return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0), 1439 Op->getOperand(1), Op->getOperand(2)); 1440 case Intrinsic::mips_min_u_b: 1441 case Intrinsic::mips_min_u_h: 1442 case Intrinsic::mips_min_u_w: 1443 case Intrinsic::mips_min_u_d: 1444 return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0), 1445 Op->getOperand(1), Op->getOperand(2)); 1446 case Intrinsic::mips_mini_s_b: 1447 case Intrinsic::mips_mini_s_h: 1448 case Intrinsic::mips_mini_s_w: 1449 case Intrinsic::mips_mini_s_d: 1450 return DAG.getNode(MipsISD::VSMIN, DL, Op->getValueType(0), 1451 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1452 case Intrinsic::mips_mini_u_b: 1453 case Intrinsic::mips_mini_u_h: 1454 case Intrinsic::mips_mini_u_w: 1455 case Intrinsic::mips_mini_u_d: 1456 return DAG.getNode(MipsISD::VUMIN, DL, Op->getValueType(0), 1457 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1458 case Intrinsic::mips_mod_s_b: 1459 case Intrinsic::mips_mod_s_h: 1460 case Intrinsic::mips_mod_s_w: 1461 case Intrinsic::mips_mod_s_d: 1462 return DAG.getNode(ISD::SREM, DL, Op->getValueType(0), Op->getOperand(1), 1463 Op->getOperand(2)); 1464 case Intrinsic::mips_mod_u_b: 1465 case Intrinsic::mips_mod_u_h: 1466 case Intrinsic::mips_mod_u_w: 1467 case Intrinsic::mips_mod_u_d: 1468 return DAG.getNode(ISD::UREM, DL, Op->getValueType(0), Op->getOperand(1), 1469 Op->getOperand(2)); 1470 case Intrinsic::mips_mulv_b: 1471 case Intrinsic::mips_mulv_h: 1472 case Intrinsic::mips_mulv_w: 1473 case Intrinsic::mips_mulv_d: 1474 return DAG.getNode(ISD::MUL, DL, Op->getValueType(0), Op->getOperand(1), 1475 Op->getOperand(2)); 1476 case Intrinsic::mips_msubv_b: 1477 case Intrinsic::mips_msubv_h: 1478 case Intrinsic::mips_msubv_w: 1479 case Intrinsic::mips_msubv_d: { 1480 EVT ResTy = Op->getValueType(0); 1481 return DAG.getNode(ISD::SUB, SDLoc(Op), ResTy, Op->getOperand(1), 1482 DAG.getNode(ISD::MUL, SDLoc(Op), ResTy, 1483 Op->getOperand(2), Op->getOperand(3))); 1484 } 1485 case Intrinsic::mips_nlzc_b: 1486 case Intrinsic::mips_nlzc_h: 1487 case Intrinsic::mips_nlzc_w: 1488 case Intrinsic::mips_nlzc_d: 1489 return DAG.getNode(ISD::CTLZ, DL, Op->getValueType(0), Op->getOperand(1)); 1490 case Intrinsic::mips_nor_v: { 1491 SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0), 1492 Op->getOperand(1), Op->getOperand(2)); 1493 return DAG.getNOT(DL, Res, Res->getValueType(0)); 1494 } 1495 case Intrinsic::mips_nori_b: { 1496 SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0), 1497 Op->getOperand(1), 1498 lowerMSASplatImm(Op, 2, DAG)); 1499 return DAG.getNOT(DL, Res, Res->getValueType(0)); 1500 } 1501 case Intrinsic::mips_or_v: 1502 return DAG.getNode(ISD::OR, DL, Op->getValueType(0), Op->getOperand(1), 1503 Op->getOperand(2)); 1504 case Intrinsic::mips_ori_b: 1505 return DAG.getNode(ISD::OR, DL, Op->getValueType(0), 1506 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1507 case Intrinsic::mips_pckev_b: 1508 case Intrinsic::mips_pckev_h: 1509 case Intrinsic::mips_pckev_w: 1510 case Intrinsic::mips_pckev_d: 1511 return DAG.getNode(MipsISD::PCKEV, DL, Op->getValueType(0), 1512 Op->getOperand(1), Op->getOperand(2)); 1513 case Intrinsic::mips_pckod_b: 1514 case Intrinsic::mips_pckod_h: 1515 case Intrinsic::mips_pckod_w: 1516 case Intrinsic::mips_pckod_d: 1517 return DAG.getNode(MipsISD::PCKOD, DL, Op->getValueType(0), 1518 Op->getOperand(1), Op->getOperand(2)); 1519 case Intrinsic::mips_pcnt_b: 1520 case Intrinsic::mips_pcnt_h: 1521 case Intrinsic::mips_pcnt_w: 1522 case Intrinsic::mips_pcnt_d: 1523 return DAG.getNode(ISD::CTPOP, DL, Op->getValueType(0), Op->getOperand(1)); 1524 case Intrinsic::mips_shf_b: 1525 case Intrinsic::mips_shf_h: 1526 case Intrinsic::mips_shf_w: 1527 return DAG.getNode(MipsISD::SHF, DL, Op->getValueType(0), 1528 Op->getOperand(2), Op->getOperand(1)); 1529 case Intrinsic::mips_sll_b: 1530 case Intrinsic::mips_sll_h: 1531 case Intrinsic::mips_sll_w: 1532 case Intrinsic::mips_sll_d: 1533 return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1), 1534 Op->getOperand(2)); 1535 case Intrinsic::mips_slli_b: 1536 case Intrinsic::mips_slli_h: 1537 case Intrinsic::mips_slli_w: 1538 case Intrinsic::mips_slli_d: 1539 return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), 1540 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1541 case Intrinsic::mips_splati_b: 1542 case Intrinsic::mips_splati_h: 1543 case Intrinsic::mips_splati_w: 1544 case Intrinsic::mips_splati_d: 1545 return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0), 1546 lowerMSASplatImm(Op, 2, DAG), Op->getOperand(1), 1547 Op->getOperand(1)); 1548 case Intrinsic::mips_sra_b: 1549 case Intrinsic::mips_sra_h: 1550 case Intrinsic::mips_sra_w: 1551 case Intrinsic::mips_sra_d: 1552 return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1), 1553 Op->getOperand(2)); 1554 case Intrinsic::mips_srai_b: 1555 case Intrinsic::mips_srai_h: 1556 case Intrinsic::mips_srai_w: 1557 case Intrinsic::mips_srai_d: 1558 return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), 1559 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1560 case Intrinsic::mips_srl_b: 1561 case Intrinsic::mips_srl_h: 1562 case Intrinsic::mips_srl_w: 1563 case Intrinsic::mips_srl_d: 1564 return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1), 1565 Op->getOperand(2)); 1566 case Intrinsic::mips_srli_b: 1567 case Intrinsic::mips_srli_h: 1568 case Intrinsic::mips_srli_w: 1569 case Intrinsic::mips_srli_d: 1570 return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), 1571 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1572 case Intrinsic::mips_subv_b: 1573 case Intrinsic::mips_subv_h: 1574 case Intrinsic::mips_subv_w: 1575 case Intrinsic::mips_subv_d: 1576 return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), Op->getOperand(1), 1577 Op->getOperand(2)); 1578 case Intrinsic::mips_subvi_b: 1579 case Intrinsic::mips_subvi_h: 1580 case Intrinsic::mips_subvi_w: 1581 case Intrinsic::mips_subvi_d: 1582 return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), 1583 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1584 case Intrinsic::mips_vshf_b: 1585 case Intrinsic::mips_vshf_h: 1586 case Intrinsic::mips_vshf_w: 1587 case Intrinsic::mips_vshf_d: 1588 return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0), 1589 Op->getOperand(1), Op->getOperand(2), Op->getOperand(3)); 1590 case Intrinsic::mips_xor_v: 1591 return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), Op->getOperand(1), 1592 Op->getOperand(2)); 1593 case Intrinsic::mips_xori_b: 1594 return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), 1595 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 1596 } 1597} 1598 1599static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) { 1600 SDLoc DL(Op); 1601 SDValue ChainIn = Op->getOperand(0); 1602 SDValue Address = Op->getOperand(2); 1603 SDValue Offset = Op->getOperand(3); 1604 EVT ResTy = Op->getValueType(0); 1605 EVT PtrTy = Address->getValueType(0); 1606 1607 Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset); 1608 1609 return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), false, 1610 false, false, 16); 1611} 1612 1613SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, 1614 SelectionDAG &DAG) const { 1615 unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue(); 1616 switch (Intr) { 1617 default: 1618 return SDValue(); 1619 case Intrinsic::mips_extp: 1620 return lowerDSPIntr(Op, DAG, MipsISD::EXTP); 1621 case Intrinsic::mips_extpdp: 1622 return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP); 1623 case Intrinsic::mips_extr_w: 1624 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W); 1625 case Intrinsic::mips_extr_r_w: 1626 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W); 1627 case Intrinsic::mips_extr_rs_w: 1628 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W); 1629 case Intrinsic::mips_extr_s_h: 1630 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H); 1631 case Intrinsic::mips_mthlip: 1632 return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP); 1633 case Intrinsic::mips_mulsaq_s_w_ph: 1634 return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH); 1635 case Intrinsic::mips_maq_s_w_phl: 1636 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL); 1637 case Intrinsic::mips_maq_s_w_phr: 1638 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR); 1639 case Intrinsic::mips_maq_sa_w_phl: 1640 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL); 1641 case Intrinsic::mips_maq_sa_w_phr: 1642 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR); 1643 case Intrinsic::mips_dpaq_s_w_ph: 1644 return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH); 1645 case Intrinsic::mips_dpsq_s_w_ph: 1646 return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH); 1647 case Intrinsic::mips_dpaq_sa_l_w: 1648 return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W); 1649 case Intrinsic::mips_dpsq_sa_l_w: 1650 return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W); 1651 case Intrinsic::mips_dpaqx_s_w_ph: 1652 return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH); 1653 case Intrinsic::mips_dpaqx_sa_w_ph: 1654 return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH); 1655 case Intrinsic::mips_dpsqx_s_w_ph: 1656 return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH); 1657 case Intrinsic::mips_dpsqx_sa_w_ph: 1658 return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH); 1659 case Intrinsic::mips_ld_b: 1660 case Intrinsic::mips_ld_h: 1661 case Intrinsic::mips_ld_w: 1662 case Intrinsic::mips_ld_d: 1663 case Intrinsic::mips_ldx_b: 1664 case Intrinsic::mips_ldx_h: 1665 case Intrinsic::mips_ldx_w: 1666 case Intrinsic::mips_ldx_d: 1667 return lowerMSALoadIntr(Op, DAG, Intr); 1668 } 1669} 1670 1671static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr) { 1672 SDLoc DL(Op); 1673 SDValue ChainIn = Op->getOperand(0); 1674 SDValue Value = Op->getOperand(2); 1675 SDValue Address = Op->getOperand(3); 1676 SDValue Offset = Op->getOperand(4); 1677 EVT PtrTy = Address->getValueType(0); 1678 1679 Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset); 1680 1681 return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), false, 1682 false, 16); 1683} 1684 1685SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op, 1686 SelectionDAG &DAG) const { 1687 unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue(); 1688 switch (Intr) { 1689 default: 1690 return SDValue(); 1691 case Intrinsic::mips_st_b: 1692 case Intrinsic::mips_st_h: 1693 case Intrinsic::mips_st_w: 1694 case Intrinsic::mips_st_d: 1695 case Intrinsic::mips_stx_b: 1696 case Intrinsic::mips_stx_h: 1697 case Intrinsic::mips_stx_w: 1698 case Intrinsic::mips_stx_d: 1699 return lowerMSAStoreIntr(Op, DAG, Intr); 1700 } 1701} 1702 1703/// \brief Check if the given BuildVectorSDNode is a splat. 1704/// This method currently relies on DAG nodes being reused when equivalent, 1705/// so it's possible for this to return false even when isConstantSplat returns 1706/// true. 1707static bool isSplatVector(const BuildVectorSDNode *N) { 1708 unsigned int nOps = N->getNumOperands(); 1709 assert(nOps > 1 && "isSplat has 0 or 1 sized build vector"); 1710 1711 SDValue Operand0 = N->getOperand(0); 1712 1713 for (unsigned int i = 1; i < nOps; ++i) { 1714 if (N->getOperand(i) != Operand0) 1715 return false; 1716 } 1717 1718 return true; 1719} 1720 1721// Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT. 1722// 1723// The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We 1724// choose to sign-extend but we could have equally chosen zero-extend. The 1725// DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT 1726// result into this node later (possibly changing it to a zero-extend in the 1727// process). 1728SDValue MipsSETargetLowering:: 1729lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 1730 SDLoc DL(Op); 1731 EVT ResTy = Op->getValueType(0); 1732 SDValue Op0 = Op->getOperand(0); 1733 EVT VecTy = Op0->getValueType(0); 1734 1735 if (!VecTy.is128BitVector()) 1736 return SDValue(); 1737 1738 if (ResTy.isInteger()) { 1739 SDValue Op1 = Op->getOperand(1); 1740 EVT EltTy = VecTy.getVectorElementType(); 1741 return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1, 1742 DAG.getValueType(EltTy)); 1743 } 1744 1745 return Op; 1746} 1747 1748static bool isConstantOrUndef(const SDValue Op) { 1749 if (Op->getOpcode() == ISD::UNDEF) 1750 return true; 1751 if (dyn_cast<ConstantSDNode>(Op)) 1752 return true; 1753 if (dyn_cast<ConstantFPSDNode>(Op)) 1754 return true; 1755 return false; 1756} 1757 1758static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) { 1759 for (unsigned i = 0; i < Op->getNumOperands(); ++i) 1760 if (isConstantOrUndef(Op->getOperand(i))) 1761 return true; 1762 return false; 1763} 1764 1765// Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the 1766// backend. 1767// 1768// Lowers according to the following rules: 1769// - Constant splats are legal as-is as long as the SplatBitSize is a power of 1770// 2 less than or equal to 64 and the value fits into a signed 10-bit 1771// immediate 1772// - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize 1773// is a power of 2 less than or equal to 64 and the value does not fit into a 1774// signed 10-bit immediate 1775// - Non-constant splats are legal as-is. 1776// - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT. 1777// - All others are illegal and must be expanded. 1778SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op, 1779 SelectionDAG &DAG) const { 1780 BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op); 1781 EVT ResTy = Op->getValueType(0); 1782 SDLoc DL(Op); 1783 APInt SplatValue, SplatUndef; 1784 unsigned SplatBitSize; 1785 bool HasAnyUndefs; 1786 1787 if (!Subtarget->hasMSA() || !ResTy.is128BitVector()) 1788 return SDValue(); 1789 1790 if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 1791 HasAnyUndefs, 8, 1792 !Subtarget->isLittle()) && SplatBitSize <= 64) { 1793 // We can only cope with 8, 16, 32, or 64-bit elements 1794 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 && 1795 SplatBitSize != 64) 1796 return SDValue(); 1797 1798 // If the value fits into a simm10 then we can use ldi.[bhwd] 1799 if (SplatValue.isSignedIntN(10)) 1800 return Op; 1801 1802 EVT ViaVecTy; 1803 1804 switch (SplatBitSize) { 1805 default: 1806 return SDValue(); 1807 case 8: 1808 ViaVecTy = MVT::v16i8; 1809 break; 1810 case 16: 1811 ViaVecTy = MVT::v8i16; 1812 break; 1813 case 32: 1814 ViaVecTy = MVT::v4i32; 1815 break; 1816 case 64: 1817 // There's no fill.d to fall back on for 64-bit values 1818 return SDValue(); 1819 } 1820 1821 SmallVector<SDValue, 16> Ops; 1822 SDValue Constant = DAG.getConstant(SplatValue.sextOrSelf(32), MVT::i32); 1823 1824 for (unsigned i = 0; i < ViaVecTy.getVectorNumElements(); ++i) 1825 Ops.push_back(Constant); 1826 1827 SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Node), ViaVecTy, 1828 &Ops[0], Ops.size()); 1829 1830 if (ViaVecTy != ResTy) 1831 Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result); 1832 1833 return Result; 1834 } else if (isSplatVector(Node)) 1835 return Op; 1836 else if (!isConstantOrUndefBUILD_VECTOR(Node)) { 1837 // Use INSERT_VECTOR_ELT operations rather than expand to stores. 1838 // The resulting code is the same length as the expansion, but it doesn't 1839 // use memory operations 1840 EVT ResTy = Node->getValueType(0); 1841 1842 assert(ResTy.isVector()); 1843 1844 unsigned NumElts = ResTy.getVectorNumElements(); 1845 SDValue Vector = DAG.getUNDEF(ResTy); 1846 for (unsigned i = 0; i < NumElts; ++i) { 1847 Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, 1848 Node->getOperand(i), 1849 DAG.getConstant(i, MVT::i32)); 1850 } 1851 return Vector; 1852 } 1853 1854 return SDValue(); 1855} 1856 1857// Lower VECTOR_SHUFFLE into SHF (if possible). 1858// 1859// SHF splits the vector into blocks of four elements, then shuffles these 1860// elements according to a <4 x i2> constant (encoded as an integer immediate). 1861// 1862// It is therefore possible to lower into SHF when the mask takes the form: 1863// <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...> 1864// When undef's appear they are treated as if they were whatever value is 1865// necessary in order to fit the above form. 1866// 1867// For example: 1868// %2 = shufflevector <8 x i16> %0, <8 x i16> undef, 1869// <8 x i32> <i32 3, i32 2, i32 1, i32 0, 1870// i32 7, i32 6, i32 5, i32 4> 1871// is lowered to: 1872// (SHF_H $w0, $w1, 27) 1873// where the 27 comes from: 1874// 3 + (2 << 2) + (1 << 4) + (0 << 6) 1875static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy, 1876 SmallVector<int, 16> Indices, 1877 SelectionDAG &DAG) { 1878 int SHFIndices[4] = { -1, -1, -1, -1 }; 1879 1880 if (Indices.size() < 4) 1881 return SDValue(); 1882 1883 for (unsigned i = 0; i < 4; ++i) { 1884 for (unsigned j = i; j < Indices.size(); j += 4) { 1885 int Idx = Indices[j]; 1886 1887 // Convert from vector index to 4-element subvector index 1888 // If an index refers to an element outside of the subvector then give up 1889 if (Idx != -1) { 1890 Idx -= 4 * (j / 4); 1891 if (Idx < 0 || Idx >= 4) 1892 return SDValue(); 1893 } 1894 1895 // If the mask has an undef, replace it with the current index. 1896 // Note that it might still be undef if the current index is also undef 1897 if (SHFIndices[i] == -1) 1898 SHFIndices[i] = Idx; 1899 1900 // Check that non-undef values are the same as in the mask. If they 1901 // aren't then give up 1902 if (!(Idx == -1 || Idx == SHFIndices[i])) 1903 return SDValue(); 1904 } 1905 } 1906 1907 // Calculate the immediate. Replace any remaining undefs with zero 1908 APInt Imm(32, 0); 1909 for (int i = 3; i >= 0; --i) { 1910 int Idx = SHFIndices[i]; 1911 1912 if (Idx == -1) 1913 Idx = 0; 1914 1915 Imm <<= 2; 1916 Imm |= Idx & 0x3; 1917 } 1918 1919 return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy, 1920 DAG.getConstant(Imm, MVT::i32), Op->getOperand(0)); 1921} 1922 1923// Lower VECTOR_SHUFFLE into ILVEV (if possible). 1924// 1925// ILVEV interleaves the even elements from each vector. 1926// 1927// It is possible to lower into ILVEV when the mask takes the form: 1928// <0, n, 2, n+2, 4, n+4, ...> 1929// where n is the number of elements in the vector. 1930// 1931// When undef's appear in the mask they are treated as if they were whatever 1932// value is necessary in order to fit the above form. 1933static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy, 1934 SmallVector<int, 16> Indices, 1935 SelectionDAG &DAG) { 1936 assert ((Indices.size() % 2) == 0); 1937 int WsIdx = 0; 1938 int WtIdx = ResTy.getVectorNumElements(); 1939 1940 for (unsigned i = 0; i < Indices.size(); i += 2) { 1941 if (Indices[i] != -1 && Indices[i] != WsIdx) 1942 return SDValue(); 1943 if (Indices[i+1] != -1 && Indices[i+1] != WtIdx) 1944 return SDValue(); 1945 WsIdx += 2; 1946 WtIdx += 2; 1947 } 1948 1949 return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0), 1950 Op->getOperand(1)); 1951} 1952 1953// Lower VECTOR_SHUFFLE into ILVOD (if possible). 1954// 1955// ILVOD interleaves the odd elements from each vector. 1956// 1957// It is possible to lower into ILVOD when the mask takes the form: 1958// <1, n+1, 3, n+3, 5, n+5, ...> 1959// where n is the number of elements in the vector. 1960// 1961// When undef's appear in the mask they are treated as if they were whatever 1962// value is necessary in order to fit the above form. 1963static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy, 1964 SmallVector<int, 16> Indices, 1965 SelectionDAG &DAG) { 1966 assert ((Indices.size() % 2) == 0); 1967 int WsIdx = 1; 1968 int WtIdx = ResTy.getVectorNumElements() + 1; 1969 1970 for (unsigned i = 0; i < Indices.size(); i += 2) { 1971 if (Indices[i] != -1 && Indices[i] != WsIdx) 1972 return SDValue(); 1973 if (Indices[i+1] != -1 && Indices[i+1] != WtIdx) 1974 return SDValue(); 1975 WsIdx += 2; 1976 WtIdx += 2; 1977 } 1978 1979 return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0), 1980 Op->getOperand(1)); 1981} 1982 1983// Lower VECTOR_SHUFFLE into ILVL (if possible). 1984// 1985// ILVL interleaves consecutive elements from the left half of each vector. 1986// 1987// It is possible to lower into ILVL when the mask takes the form: 1988// <0, n, 1, n+1, 2, n+2, ...> 1989// where n is the number of elements in the vector. 1990// 1991// When undef's appear in the mask they are treated as if they were whatever 1992// value is necessary in order to fit the above form. 1993static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy, 1994 SmallVector<int, 16> Indices, 1995 SelectionDAG &DAG) { 1996 assert ((Indices.size() % 2) == 0); 1997 int WsIdx = 0; 1998 int WtIdx = ResTy.getVectorNumElements(); 1999 2000 for (unsigned i = 0; i < Indices.size(); i += 2) { 2001 if (Indices[i] != -1 && Indices[i] != WsIdx) 2002 return SDValue(); 2003 if (Indices[i+1] != -1 && Indices[i+1] != WtIdx) 2004 return SDValue(); 2005 WsIdx ++; 2006 WtIdx ++; 2007 } 2008 2009 return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0), 2010 Op->getOperand(1)); 2011} 2012 2013// Lower VECTOR_SHUFFLE into ILVR (if possible). 2014// 2015// ILVR interleaves consecutive elements from the right half of each vector. 2016// 2017// It is possible to lower into ILVR when the mask takes the form: 2018// <x, n+x, x+1, n+x+1, x+2, n+x+2, ...> 2019// where n is the number of elements in the vector and x is half n. 2020// 2021// When undef's appear in the mask they are treated as if they were whatever 2022// value is necessary in order to fit the above form. 2023static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy, 2024 SmallVector<int, 16> Indices, 2025 SelectionDAG &DAG) { 2026 assert ((Indices.size() % 2) == 0); 2027 unsigned NumElts = ResTy.getVectorNumElements(); 2028 int WsIdx = NumElts / 2; 2029 int WtIdx = NumElts + NumElts / 2; 2030 2031 for (unsigned i = 0; i < Indices.size(); i += 2) { 2032 if (Indices[i] != -1 && Indices[i] != WsIdx) 2033 return SDValue(); 2034 if (Indices[i+1] != -1 && Indices[i+1] != WtIdx) 2035 return SDValue(); 2036 WsIdx ++; 2037 WtIdx ++; 2038 } 2039 2040 return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0), 2041 Op->getOperand(1)); 2042} 2043 2044// Lower VECTOR_SHUFFLE into PCKEV (if possible). 2045// 2046// PCKEV copies the even elements of each vector into the result vector. 2047// 2048// It is possible to lower into PCKEV when the mask takes the form: 2049// <0, 2, 4, ..., n, n+2, n+4, ...> 2050// where n is the number of elements in the vector. 2051// 2052// When undef's appear in the mask they are treated as if they were whatever 2053// value is necessary in order to fit the above form. 2054static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy, 2055 SmallVector<int, 16> Indices, 2056 SelectionDAG &DAG) { 2057 assert ((Indices.size() % 2) == 0); 2058 int Idx = 0; 2059 2060 for (unsigned i = 0; i < Indices.size(); ++i) { 2061 if (Indices[i] != -1 && Indices[i] != Idx) 2062 return SDValue(); 2063 Idx += 2; 2064 } 2065 2066 return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0), 2067 Op->getOperand(1)); 2068} 2069 2070// Lower VECTOR_SHUFFLE into PCKOD (if possible). 2071// 2072// PCKOD copies the odd elements of each vector into the result vector. 2073// 2074// It is possible to lower into PCKOD when the mask takes the form: 2075// <1, 3, 5, ..., n+1, n+3, n+5, ...> 2076// where n is the number of elements in the vector. 2077// 2078// When undef's appear in the mask they are treated as if they were whatever 2079// value is necessary in order to fit the above form. 2080static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy, 2081 SmallVector<int, 16> Indices, 2082 SelectionDAG &DAG) { 2083 assert ((Indices.size() % 2) == 0); 2084 int Idx = 1; 2085 2086 for (unsigned i = 0; i < Indices.size(); ++i) { 2087 if (Indices[i] != -1 && Indices[i] != Idx) 2088 return SDValue(); 2089 Idx += 2; 2090 } 2091 2092 return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0), 2093 Op->getOperand(1)); 2094} 2095 2096// Lower VECTOR_SHUFFLE into VSHF. 2097// 2098// This mostly consists of converting the shuffle indices in Indices into a 2099// BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is 2100// also code to eliminate unused operands of the VECTOR_SHUFFLE. For example, 2101// if the type is v8i16 and all the indices are less than 8 then the second 2102// operand is unused and can be replaced with anything. We choose to replace it 2103// with the used operand since this reduces the number of instructions overall. 2104static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy, 2105 SmallVector<int, 16> Indices, 2106 SelectionDAG &DAG) { 2107 SmallVector<SDValue, 16> Ops; 2108 SDValue Op0; 2109 SDValue Op1; 2110 EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger(); 2111 EVT MaskEltTy = MaskVecTy.getVectorElementType(); 2112 bool Using1stVec = false; 2113 bool Using2ndVec = false; 2114 SDLoc DL(Op); 2115 int ResTyNumElts = ResTy.getVectorNumElements(); 2116 2117 for (int i = 0; i < ResTyNumElts; ++i) { 2118 // Idx == -1 means UNDEF 2119 int Idx = Indices[i]; 2120 2121 if (0 <= Idx && Idx < ResTyNumElts) 2122 Using1stVec = true; 2123 if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2) 2124 Using2ndVec = true; 2125 } 2126 2127 for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end(); 2128 ++I) 2129 Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy)); 2130 2131 SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, &Ops[0], 2132 Ops.size()); 2133 2134 if (Using1stVec && Using2ndVec) { 2135 Op0 = Op->getOperand(0); 2136 Op1 = Op->getOperand(1); 2137 } else if (Using1stVec) 2138 Op0 = Op1 = Op->getOperand(0); 2139 else if (Using2ndVec) 2140 Op0 = Op1 = Op->getOperand(1); 2141 else 2142 llvm_unreachable("shuffle vector mask references neither vector operand?"); 2143 2144 return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op0, Op1); 2145} 2146 2147// Lower VECTOR_SHUFFLE into one of a number of instructions depending on the 2148// indices in the shuffle. 2149SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 2150 SelectionDAG &DAG) const { 2151 ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op); 2152 EVT ResTy = Op->getValueType(0); 2153 2154 if (!ResTy.is128BitVector()) 2155 return SDValue(); 2156 2157 int ResTyNumElts = ResTy.getVectorNumElements(); 2158 SmallVector<int, 16> Indices; 2159 2160 for (int i = 0; i < ResTyNumElts; ++i) 2161 Indices.push_back(Node->getMaskElt(i)); 2162 2163 SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG); 2164 if (Result.getNode()) 2165 return Result; 2166 Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG); 2167 if (Result.getNode()) 2168 return Result; 2169 Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG); 2170 if (Result.getNode()) 2171 return Result; 2172 Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG); 2173 if (Result.getNode()) 2174 return Result; 2175 Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG); 2176 if (Result.getNode()) 2177 return Result; 2178 Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG); 2179 if (Result.getNode()) 2180 return Result; 2181 Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG); 2182 if (Result.getNode()) 2183 return Result; 2184 return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG); 2185} 2186 2187MachineBasicBlock * MipsSETargetLowering:: 2188emitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{ 2189 // $bb: 2190 // bposge32_pseudo $vr0 2191 // => 2192 // $bb: 2193 // bposge32 $tbb 2194 // $fbb: 2195 // li $vr2, 0 2196 // b $sink 2197 // $tbb: 2198 // li $vr1, 1 2199 // $sink: 2200 // $vr0 = phi($vr2, $fbb, $vr1, $tbb) 2201 2202 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 2203 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2204 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 2205 DebugLoc DL = MI->getDebugLoc(); 2206 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2207 MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB)); 2208 MachineFunction *F = BB->getParent(); 2209 MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB); 2210 MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB); 2211 MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB); 2212 F->insert(It, FBB); 2213 F->insert(It, TBB); 2214 F->insert(It, Sink); 2215 2216 // Transfer the remainder of BB and its successor edges to Sink. 2217 Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)), 2218 BB->end()); 2219 Sink->transferSuccessorsAndUpdatePHIs(BB); 2220 2221 // Add successors. 2222 BB->addSuccessor(FBB); 2223 BB->addSuccessor(TBB); 2224 FBB->addSuccessor(Sink); 2225 TBB->addSuccessor(Sink); 2226 2227 // Insert the real bposge32 instruction to $BB. 2228 BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB); 2229 2230 // Fill $FBB. 2231 unsigned VR2 = RegInfo.createVirtualRegister(RC); 2232 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2) 2233 .addReg(Mips::ZERO).addImm(0); 2234 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink); 2235 2236 // Fill $TBB. 2237 unsigned VR1 = RegInfo.createVirtualRegister(RC); 2238 BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1) 2239 .addReg(Mips::ZERO).addImm(1); 2240 2241 // Insert phi function to $Sink. 2242 BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI), 2243 MI->getOperand(0).getReg()) 2244 .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB); 2245 2246 MI->eraseFromParent(); // The pseudo instruction is gone now. 2247 return Sink; 2248} 2249 2250MachineBasicBlock * MipsSETargetLowering:: 2251emitMSACBranchPseudo(MachineInstr *MI, MachineBasicBlock *BB, 2252 unsigned BranchOp) const{ 2253 // $bb: 2254 // vany_nonzero $rd, $ws 2255 // => 2256 // $bb: 2257 // bnz.b $ws, $tbb 2258 // b $fbb 2259 // $fbb: 2260 // li $rd1, 0 2261 // b $sink 2262 // $tbb: 2263 // li $rd2, 1 2264 // $sink: 2265 // $rd = phi($rd1, $fbb, $rd2, $tbb) 2266 2267 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 2268 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2269 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 2270 DebugLoc DL = MI->getDebugLoc(); 2271 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2272 MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB)); 2273 MachineFunction *F = BB->getParent(); 2274 MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB); 2275 MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB); 2276 MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB); 2277 F->insert(It, FBB); 2278 F->insert(It, TBB); 2279 F->insert(It, Sink); 2280 2281 // Transfer the remainder of BB and its successor edges to Sink. 2282 Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)), 2283 BB->end()); 2284 Sink->transferSuccessorsAndUpdatePHIs(BB); 2285 2286 // Add successors. 2287 BB->addSuccessor(FBB); 2288 BB->addSuccessor(TBB); 2289 FBB->addSuccessor(Sink); 2290 TBB->addSuccessor(Sink); 2291 2292 // Insert the real bnz.b instruction to $BB. 2293 BuildMI(BB, DL, TII->get(BranchOp)) 2294 .addReg(MI->getOperand(1).getReg()) 2295 .addMBB(TBB); 2296 2297 // Fill $FBB. 2298 unsigned RD1 = RegInfo.createVirtualRegister(RC); 2299 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1) 2300 .addReg(Mips::ZERO).addImm(0); 2301 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink); 2302 2303 // Fill $TBB. 2304 unsigned RD2 = RegInfo.createVirtualRegister(RC); 2305 BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2) 2306 .addReg(Mips::ZERO).addImm(1); 2307 2308 // Insert phi function to $Sink. 2309 BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI), 2310 MI->getOperand(0).getReg()) 2311 .addReg(RD1).addMBB(FBB).addReg(RD2).addMBB(TBB); 2312 2313 MI->eraseFromParent(); // The pseudo instruction is gone now. 2314 return Sink; 2315} 2316 2317// Emit the COPY_FW pseudo instruction. 2318// 2319// copy_fw_pseudo $fd, $ws, n 2320// => 2321// copy_u_w $rt, $ws, $n 2322// mtc1 $rt, $fd 2323// 2324// When n is zero, the equivalent operation can be performed with (potentially) 2325// zero instructions due to register overlaps. This optimization is never valid 2326// for lane 1 because it would require FR=0 mode which isn't supported by MSA. 2327MachineBasicBlock * MipsSETargetLowering:: 2328emitCOPY_FW(MachineInstr *MI, MachineBasicBlock *BB) const{ 2329 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2330 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 2331 DebugLoc DL = MI->getDebugLoc(); 2332 unsigned Fd = MI->getOperand(0).getReg(); 2333 unsigned Ws = MI->getOperand(1).getReg(); 2334 unsigned Lane = MI->getOperand(2).getImm(); 2335 2336 if (Lane == 0) 2337 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_lo); 2338 else { 2339 unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); 2340 2341 BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(1); 2342 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo); 2343 } 2344 2345 MI->eraseFromParent(); // The pseudo instruction is gone now. 2346 return BB; 2347} 2348 2349// Emit the COPY_FD pseudo instruction. 2350// 2351// copy_fd_pseudo $fd, $ws, n 2352// => 2353// splati.d $wt, $ws, $n 2354// copy $fd, $wt:sub_64 2355// 2356// When n is zero, the equivalent operation can be performed with (potentially) 2357// zero instructions due to register overlaps. This optimization is always 2358// valid because FR=1 mode which is the only supported mode in MSA. 2359MachineBasicBlock * MipsSETargetLowering:: 2360emitCOPY_FD(MachineInstr *MI, MachineBasicBlock *BB) const{ 2361 assert(Subtarget->isFP64bit()); 2362 2363 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2364 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 2365 unsigned Fd = MI->getOperand(0).getReg(); 2366 unsigned Ws = MI->getOperand(1).getReg(); 2367 unsigned Lane = MI->getOperand(2).getImm() * 2; 2368 DebugLoc DL = MI->getDebugLoc(); 2369 2370 if (Lane == 0) 2371 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64); 2372 else { 2373 unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); 2374 2375 BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1); 2376 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64); 2377 } 2378 2379 MI->eraseFromParent(); // The pseudo instruction is gone now. 2380 return BB; 2381} 2382 2383// Emit the INSERT_FW pseudo instruction. 2384// 2385// insert_fw_pseudo $wd, $wd_in, $n, $fs 2386// => 2387// subreg_to_reg $wt:sub_lo, $fs 2388// insve_w $wd[$n], $wd_in, $wt[0] 2389MachineBasicBlock * MipsSETargetLowering:: 2390emitINSERT_FW(MachineInstr *MI, MachineBasicBlock *BB) const{ 2391 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2392 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 2393 DebugLoc DL = MI->getDebugLoc(); 2394 unsigned Wd = MI->getOperand(0).getReg(); 2395 unsigned Wd_in = MI->getOperand(1).getReg(); 2396 unsigned Lane = MI->getOperand(2).getImm(); 2397 unsigned Fs = MI->getOperand(3).getReg(); 2398 unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); 2399 2400 BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) 2401 .addImm(0).addReg(Fs).addImm(Mips::sub_lo); 2402 BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_W), Wd) 2403 .addReg(Wd_in).addImm(Lane).addReg(Wt); 2404 2405 MI->eraseFromParent(); // The pseudo instruction is gone now. 2406 return BB; 2407} 2408 2409// Emit the INSERT_FD pseudo instruction. 2410// 2411// insert_fd_pseudo $wd, $fs, n 2412// => 2413// subreg_to_reg $wt:sub_64, $fs 2414// insve_d $wd[$n], $wd_in, $wt[0] 2415MachineBasicBlock * MipsSETargetLowering:: 2416emitINSERT_FD(MachineInstr *MI, MachineBasicBlock *BB) const{ 2417 assert(Subtarget->isFP64bit()); 2418 2419 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 2420 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 2421 DebugLoc DL = MI->getDebugLoc(); 2422 unsigned Wd = MI->getOperand(0).getReg(); 2423 unsigned Wd_in = MI->getOperand(1).getReg(); 2424 unsigned Lane = MI->getOperand(2).getImm(); 2425 unsigned Fs = MI->getOperand(3).getReg(); 2426 unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); 2427 2428 BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) 2429 .addImm(0).addReg(Fs).addImm(Mips::sub_64); 2430 BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_D), Wd) 2431 .addReg(Wd_in).addImm(Lane).addReg(Wt); 2432 2433 MI->eraseFromParent(); // The pseudo instruction is gone now. 2434 return BB; 2435} 2436