1//===-- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ---===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the SelectionDAG::LegalizeVectors method. 11// 12// The vector legalizer looks for vector operations which might need to be 13// scalarized and legalizes them. This is a separate step from Legalize because 14// scalarizing can introduce illegal types. For example, suppose we have an 15// ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition 16// on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the 17// operation, which introduces nodes with the illegal type i64 which must be 18// expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC; 19// the operation must be unrolled, which introduces nodes with the illegal 20// type i8 which must be promoted. 21// 22// This does not legalize vector manipulations like ISD::BUILD_VECTOR, 23// or operations that happen to take a vector which are custom-lowered; 24// the legalization for such operations never produces nodes 25// with illegal types, so it's okay to put off legalizing them until 26// SelectionDAG::Legalize runs. 27// 28//===----------------------------------------------------------------------===// 29 30#include "llvm/CodeGen/SelectionDAG.h" 31#include "llvm/Target/TargetLowering.h" 32using namespace llvm; 33 34namespace { 35class VectorLegalizer { 36 SelectionDAG& DAG; 37 const TargetLowering &TLI; 38 bool Changed; // Keep track of whether anything changed 39 40 /// For nodes that are of legal width, and that have more than one use, this 41 /// map indicates what regularized operand to use. This allows us to avoid 42 /// legalizing the same thing more than once. 43 SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes; 44 45 /// \brief Adds a node to the translation cache. 46 void AddLegalizedOperand(SDValue From, SDValue To) { 47 LegalizedNodes.insert(std::make_pair(From, To)); 48 // If someone requests legalization of the new node, return itself. 49 if (From != To) 50 LegalizedNodes.insert(std::make_pair(To, To)); 51 } 52 53 /// \brief Legalizes the given node. 54 SDValue LegalizeOp(SDValue Op); 55 56 /// \brief Assuming the node is legal, "legalize" the results. 57 SDValue TranslateLegalizeResults(SDValue Op, SDValue Result); 58 59 /// \brief Implements unrolling a VSETCC. 60 SDValue UnrollVSETCC(SDValue Op); 61 62 /// \brief Implement expand-based legalization of vector operations. 63 /// 64 /// This is just a high-level routine to dispatch to specific code paths for 65 /// operations to legalize them. 66 SDValue Expand(SDValue Op); 67 68 /// \brief Implements expansion for FNEG; falls back to UnrollVectorOp if 69 /// FSUB isn't legal. 70 /// 71 /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if 72 /// SINT_TO_FLOAT and SHR on vectors isn't legal. 73 SDValue ExpandUINT_TO_FLOAT(SDValue Op); 74 75 /// \brief Implement expansion for SIGN_EXTEND_INREG using SRL and SRA. 76 SDValue ExpandSEXTINREG(SDValue Op); 77 78 /// \brief Implement expansion for ANY_EXTEND_VECTOR_INREG. 79 /// 80 /// Shuffles the low lanes of the operand into place and bitcasts to the proper 81 /// type. The contents of the bits in the extended part of each element are 82 /// undef. 83 SDValue ExpandANY_EXTEND_VECTOR_INREG(SDValue Op); 84 85 /// \brief Implement expansion for SIGN_EXTEND_VECTOR_INREG. 86 /// 87 /// Shuffles the low lanes of the operand into place, bitcasts to the proper 88 /// type, then shifts left and arithmetic shifts right to introduce a sign 89 /// extension. 90 SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op); 91 92 /// \brief Implement expansion for ZERO_EXTEND_VECTOR_INREG. 93 /// 94 /// Shuffles the low lanes of the operand into place and blends zeros into 95 /// the remaining lanes, finally bitcasting to the proper type. 96 SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op); 97 98 /// \brief Expand bswap of vectors into a shuffle if legal. 99 SDValue ExpandBSWAP(SDValue Op); 100 101 /// \brief Implement vselect in terms of XOR, AND, OR when blend is not 102 /// supported by the target. 103 SDValue ExpandVSELECT(SDValue Op); 104 SDValue ExpandSELECT(SDValue Op); 105 SDValue ExpandLoad(SDValue Op); 106 SDValue ExpandStore(SDValue Op); 107 SDValue ExpandFNEG(SDValue Op); 108 SDValue ExpandBITREVERSE(SDValue Op); 109 SDValue ExpandCTLZ_CTTZ_ZERO_UNDEF(SDValue Op); 110 111 /// \brief Implements vector promotion. 112 /// 113 /// This is essentially just bitcasting the operands to a different type and 114 /// bitcasting the result back to the original type. 115 SDValue Promote(SDValue Op); 116 117 /// \brief Implements [SU]INT_TO_FP vector promotion. 118 /// 119 /// This is a [zs]ext of the input operand to the next size up. 120 SDValue PromoteINT_TO_FP(SDValue Op); 121 122 /// \brief Implements FP_TO_[SU]INT vector promotion of the result type. 123 /// 124 /// It is promoted to the next size up integer type. The result is then 125 /// truncated back to the original type. 126 SDValue PromoteFP_TO_INT(SDValue Op, bool isSigned); 127 128public: 129 /// \brief Begin legalizer the vector operations in the DAG. 130 bool Run(); 131 VectorLegalizer(SelectionDAG& dag) : 132 DAG(dag), TLI(dag.getTargetLoweringInfo()), Changed(false) {} 133}; 134 135bool VectorLegalizer::Run() { 136 // Before we start legalizing vector nodes, check if there are any vectors. 137 bool HasVectors = false; 138 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 139 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) { 140 // Check if the values of the nodes contain vectors. We don't need to check 141 // the operands because we are going to check their values at some point. 142 for (SDNode::value_iterator J = I->value_begin(), E = I->value_end(); 143 J != E; ++J) 144 HasVectors |= J->isVector(); 145 146 // If we found a vector node we can start the legalization. 147 if (HasVectors) 148 break; 149 } 150 151 // If this basic block has no vectors then no need to legalize vectors. 152 if (!HasVectors) 153 return false; 154 155 // The legalize process is inherently a bottom-up recursive process (users 156 // legalize their uses before themselves). Given infinite stack space, we 157 // could just start legalizing on the root and traverse the whole graph. In 158 // practice however, this causes us to run out of stack space on large basic 159 // blocks. To avoid this problem, compute an ordering of the nodes where each 160 // node is only legalized after all of its operands are legalized. 161 DAG.AssignTopologicalOrder(); 162 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), 163 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) 164 LegalizeOp(SDValue(&*I, 0)); 165 166 // Finally, it's possible the root changed. Get the new root. 167 SDValue OldRoot = DAG.getRoot(); 168 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); 169 DAG.setRoot(LegalizedNodes[OldRoot]); 170 171 LegalizedNodes.clear(); 172 173 // Remove dead nodes now. 174 DAG.RemoveDeadNodes(); 175 176 return Changed; 177} 178 179SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDValue Result) { 180 // Generic legalization: just pass the operand through. 181 for (unsigned i = 0, e = Op.getNode()->getNumValues(); i != e; ++i) 182 AddLegalizedOperand(Op.getValue(i), Result.getValue(i)); 183 return Result.getValue(Op.getResNo()); 184} 185 186SDValue VectorLegalizer::LegalizeOp(SDValue Op) { 187 // Note that LegalizeOp may be reentered even from single-use nodes, which 188 // means that we always must cache transformed nodes. 189 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); 190 if (I != LegalizedNodes.end()) return I->second; 191 192 SDNode* Node = Op.getNode(); 193 194 // Legalize the operands 195 SmallVector<SDValue, 8> Ops; 196 for (const SDValue &Op : Node->op_values()) 197 Ops.push_back(LegalizeOp(Op)); 198 199 SDValue Result = SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops), 0); 200 201 bool HasVectorValue = false; 202 if (Op.getOpcode() == ISD::LOAD) { 203 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 204 ISD::LoadExtType ExtType = LD->getExtensionType(); 205 if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) 206 switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0), 207 LD->getMemoryVT())) { 208 default: llvm_unreachable("This action is not supported yet!"); 209 case TargetLowering::Legal: 210 return TranslateLegalizeResults(Op, Result); 211 case TargetLowering::Custom: 212 if (SDValue Lowered = TLI.LowerOperation(Result, DAG)) { 213 if (Lowered == Result) 214 return TranslateLegalizeResults(Op, Lowered); 215 Changed = true; 216 if (Lowered->getNumValues() != Op->getNumValues()) { 217 // This expanded to something other than the load. Assume the 218 // lowering code took care of any chain values, and just handle the 219 // returned value. 220 assert(Result.getValue(1).use_empty() && 221 "There are still live users of the old chain!"); 222 return LegalizeOp(Lowered); 223 } 224 return TranslateLegalizeResults(Op, Lowered); 225 } 226 case TargetLowering::Expand: 227 Changed = true; 228 return LegalizeOp(ExpandLoad(Op)); 229 } 230 } else if (Op.getOpcode() == ISD::STORE) { 231 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 232 EVT StVT = ST->getMemoryVT(); 233 MVT ValVT = ST->getValue().getSimpleValueType(); 234 if (StVT.isVector() && ST->isTruncatingStore()) 235 switch (TLI.getTruncStoreAction(ValVT, StVT)) { 236 default: llvm_unreachable("This action is not supported yet!"); 237 case TargetLowering::Legal: 238 return TranslateLegalizeResults(Op, Result); 239 case TargetLowering::Custom: { 240 SDValue Lowered = TLI.LowerOperation(Result, DAG); 241 Changed = Lowered != Result; 242 return TranslateLegalizeResults(Op, Lowered); 243 } 244 case TargetLowering::Expand: 245 Changed = true; 246 return LegalizeOp(ExpandStore(Op)); 247 } 248 } else if (Op.getOpcode() == ISD::MSCATTER || Op.getOpcode() == ISD::MSTORE) 249 HasVectorValue = true; 250 251 for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end(); 252 J != E; 253 ++J) 254 HasVectorValue |= J->isVector(); 255 if (!HasVectorValue) 256 return TranslateLegalizeResults(Op, Result); 257 258 EVT QueryType; 259 switch (Op.getOpcode()) { 260 default: 261 return TranslateLegalizeResults(Op, Result); 262 case ISD::ADD: 263 case ISD::SUB: 264 case ISD::MUL: 265 case ISD::SDIV: 266 case ISD::UDIV: 267 case ISD::SREM: 268 case ISD::UREM: 269 case ISD::SDIVREM: 270 case ISD::UDIVREM: 271 case ISD::FADD: 272 case ISD::FSUB: 273 case ISD::FMUL: 274 case ISD::FDIV: 275 case ISD::FREM: 276 case ISD::AND: 277 case ISD::OR: 278 case ISD::XOR: 279 case ISD::SHL: 280 case ISD::SRA: 281 case ISD::SRL: 282 case ISD::ROTL: 283 case ISD::ROTR: 284 case ISD::BSWAP: 285 case ISD::BITREVERSE: 286 case ISD::CTLZ: 287 case ISD::CTTZ: 288 case ISD::CTLZ_ZERO_UNDEF: 289 case ISD::CTTZ_ZERO_UNDEF: 290 case ISD::CTPOP: 291 case ISD::SELECT: 292 case ISD::VSELECT: 293 case ISD::SELECT_CC: 294 case ISD::SETCC: 295 case ISD::ZERO_EXTEND: 296 case ISD::ANY_EXTEND: 297 case ISD::TRUNCATE: 298 case ISD::SIGN_EXTEND: 299 case ISD::FP_TO_SINT: 300 case ISD::FP_TO_UINT: 301 case ISD::FNEG: 302 case ISD::FABS: 303 case ISD::FMINNUM: 304 case ISD::FMAXNUM: 305 case ISD::FMINNAN: 306 case ISD::FMAXNAN: 307 case ISD::FCOPYSIGN: 308 case ISD::FSQRT: 309 case ISD::FSIN: 310 case ISD::FCOS: 311 case ISD::FPOWI: 312 case ISD::FPOW: 313 case ISD::FLOG: 314 case ISD::FLOG2: 315 case ISD::FLOG10: 316 case ISD::FEXP: 317 case ISD::FEXP2: 318 case ISD::FCEIL: 319 case ISD::FTRUNC: 320 case ISD::FRINT: 321 case ISD::FNEARBYINT: 322 case ISD::FROUND: 323 case ISD::FFLOOR: 324 case ISD::FP_ROUND: 325 case ISD::FP_EXTEND: 326 case ISD::FMA: 327 case ISD::SIGN_EXTEND_INREG: 328 case ISD::ANY_EXTEND_VECTOR_INREG: 329 case ISD::SIGN_EXTEND_VECTOR_INREG: 330 case ISD::ZERO_EXTEND_VECTOR_INREG: 331 case ISD::SMIN: 332 case ISD::SMAX: 333 case ISD::UMIN: 334 case ISD::UMAX: 335 QueryType = Node->getValueType(0); 336 break; 337 case ISD::FP_ROUND_INREG: 338 QueryType = cast<VTSDNode>(Node->getOperand(1))->getVT(); 339 break; 340 case ISD::SINT_TO_FP: 341 case ISD::UINT_TO_FP: 342 QueryType = Node->getOperand(0).getValueType(); 343 break; 344 case ISD::MSCATTER: 345 QueryType = cast<MaskedScatterSDNode>(Node)->getValue().getValueType(); 346 break; 347 case ISD::MSTORE: 348 QueryType = cast<MaskedStoreSDNode>(Node)->getValue().getValueType(); 349 break; 350 } 351 352 switch (TLI.getOperationAction(Node->getOpcode(), QueryType)) { 353 default: llvm_unreachable("This action is not supported yet!"); 354 case TargetLowering::Promote: 355 Result = Promote(Op); 356 Changed = true; 357 break; 358 case TargetLowering::Legal: 359 break; 360 case TargetLowering::Custom: { 361 if (SDValue Tmp1 = TLI.LowerOperation(Op, DAG)) { 362 Result = Tmp1; 363 break; 364 } 365 // FALL THROUGH 366 } 367 case TargetLowering::Expand: 368 Result = Expand(Op); 369 } 370 371 // Make sure that the generated code is itself legal. 372 if (Result != Op) { 373 Result = LegalizeOp(Result); 374 Changed = true; 375 } 376 377 // Note that LegalizeOp may be reentered even from single-use nodes, which 378 // means that we always must cache transformed nodes. 379 AddLegalizedOperand(Op, Result); 380 return Result; 381} 382 383SDValue VectorLegalizer::Promote(SDValue Op) { 384 // For a few operations there is a specific concept for promotion based on 385 // the operand's type. 386 switch (Op.getOpcode()) { 387 case ISD::SINT_TO_FP: 388 case ISD::UINT_TO_FP: 389 // "Promote" the operation by extending the operand. 390 return PromoteINT_TO_FP(Op); 391 case ISD::FP_TO_UINT: 392 case ISD::FP_TO_SINT: 393 // Promote the operation by extending the operand. 394 return PromoteFP_TO_INT(Op, Op->getOpcode() == ISD::FP_TO_SINT); 395 } 396 397 // There are currently two cases of vector promotion: 398 // 1) Bitcasting a vector of integers to a different type to a vector of the 399 // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64. 400 // 2) Extending a vector of floats to a vector of the same number of larger 401 // floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32. 402 MVT VT = Op.getSimpleValueType(); 403 assert(Op.getNode()->getNumValues() == 1 && 404 "Can't promote a vector with multiple results!"); 405 MVT NVT = TLI.getTypeToPromoteTo(Op.getOpcode(), VT); 406 SDLoc dl(Op); 407 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 408 409 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 410 if (Op.getOperand(j).getValueType().isVector()) 411 if (Op.getOperand(j) 412 .getValueType() 413 .getVectorElementType() 414 .isFloatingPoint() && 415 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()) 416 Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Op.getOperand(j)); 417 else 418 Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j)); 419 else 420 Operands[j] = Op.getOperand(j); 421 } 422 423 Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands, Op.getNode()->getFlags()); 424 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) || 425 (VT.isVector() && VT.getVectorElementType().isFloatingPoint() && 426 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())) 427 return DAG.getNode(ISD::FP_ROUND, dl, VT, Op, DAG.getIntPtrConstant(0, dl)); 428 else 429 return DAG.getNode(ISD::BITCAST, dl, VT, Op); 430} 431 432SDValue VectorLegalizer::PromoteINT_TO_FP(SDValue Op) { 433 // INT_TO_FP operations may require the input operand be promoted even 434 // when the type is otherwise legal. 435 EVT VT = Op.getOperand(0).getValueType(); 436 assert(Op.getNode()->getNumValues() == 1 && 437 "Can't promote a vector with multiple results!"); 438 439 // Normal getTypeToPromoteTo() doesn't work here, as that will promote 440 // by widening the vector w/ the same element width and twice the number 441 // of elements. We want the other way around, the same number of elements, 442 // each twice the width. 443 // 444 // Increase the bitwidth of the element to the next pow-of-two 445 // (which is greater than 8 bits). 446 447 EVT NVT = VT.widenIntegerVectorElementType(*DAG.getContext()); 448 assert(NVT.isSimple() && "Promoting to a non-simple vector type!"); 449 SDLoc dl(Op); 450 SmallVector<SDValue, 4> Operands(Op.getNumOperands()); 451 452 unsigned Opc = Op.getOpcode() == ISD::UINT_TO_FP ? ISD::ZERO_EXTEND : 453 ISD::SIGN_EXTEND; 454 for (unsigned j = 0; j != Op.getNumOperands(); ++j) { 455 if (Op.getOperand(j).getValueType().isVector()) 456 Operands[j] = DAG.getNode(Opc, dl, NVT, Op.getOperand(j)); 457 else 458 Operands[j] = Op.getOperand(j); 459 } 460 461 return DAG.getNode(Op.getOpcode(), dl, Op.getValueType(), Operands); 462} 463 464// For FP_TO_INT we promote the result type to a vector type with wider 465// elements and then truncate the result. This is different from the default 466// PromoteVector which uses bitcast to promote thus assumning that the 467// promoted vector type has the same overall size. 468SDValue VectorLegalizer::PromoteFP_TO_INT(SDValue Op, bool isSigned) { 469 assert(Op.getNode()->getNumValues() == 1 && 470 "Can't promote a vector with multiple results!"); 471 EVT VT = Op.getValueType(); 472 473 EVT NewVT; 474 unsigned NewOpc; 475 while (1) { 476 NewVT = VT.widenIntegerVectorElementType(*DAG.getContext()); 477 assert(NewVT.isSimple() && "Promoting to a non-simple vector type!"); 478 if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewVT)) { 479 NewOpc = ISD::FP_TO_SINT; 480 break; 481 } 482 if (!isSigned && TLI.isOperationLegalOrCustom(ISD::FP_TO_UINT, NewVT)) { 483 NewOpc = ISD::FP_TO_UINT; 484 break; 485 } 486 } 487 488 SDLoc loc(Op); 489 SDValue promoted = DAG.getNode(NewOpc, SDLoc(Op), NewVT, Op.getOperand(0)); 490 return DAG.getNode(ISD::TRUNCATE, SDLoc(Op), VT, promoted); 491} 492 493 494SDValue VectorLegalizer::ExpandLoad(SDValue Op) { 495 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode()); 496 497 EVT SrcVT = LD->getMemoryVT(); 498 EVT SrcEltVT = SrcVT.getScalarType(); 499 unsigned NumElem = SrcVT.getVectorNumElements(); 500 501 502 SDValue NewChain; 503 SDValue Value; 504 if (SrcVT.getVectorNumElements() > 1 && !SrcEltVT.isByteSized()) { 505 SDLoc dl(Op); 506 507 SmallVector<SDValue, 8> Vals; 508 SmallVector<SDValue, 8> LoadChains; 509 510 EVT DstEltVT = LD->getValueType(0).getScalarType(); 511 SDValue Chain = LD->getChain(); 512 SDValue BasePTR = LD->getBasePtr(); 513 ISD::LoadExtType ExtType = LD->getExtensionType(); 514 515 // When elements in a vector is not byte-addressable, we cannot directly 516 // load each element by advancing pointer, which could only address bytes. 517 // Instead, we load all significant words, mask bits off, and concatenate 518 // them to form each element. Finally, they are extended to destination 519 // scalar type to build the destination vector. 520 EVT WideVT = TLI.getPointerTy(DAG.getDataLayout()); 521 522 assert(WideVT.isRound() && 523 "Could not handle the sophisticated case when the widest integer is" 524 " not power of 2."); 525 assert(WideVT.bitsGE(SrcEltVT) && 526 "Type is not legalized?"); 527 528 unsigned WideBytes = WideVT.getStoreSize(); 529 unsigned Offset = 0; 530 unsigned RemainingBytes = SrcVT.getStoreSize(); 531 SmallVector<SDValue, 8> LoadVals; 532 533 while (RemainingBytes > 0) { 534 SDValue ScalarLoad; 535 unsigned LoadBytes = WideBytes; 536 537 if (RemainingBytes >= LoadBytes) { 538 ScalarLoad = DAG.getLoad(WideVT, dl, Chain, BasePTR, 539 LD->getPointerInfo().getWithOffset(Offset), 540 LD->isVolatile(), LD->isNonTemporal(), 541 LD->isInvariant(), 542 MinAlign(LD->getAlignment(), Offset), 543 LD->getAAInfo()); 544 } else { 545 EVT LoadVT = WideVT; 546 while (RemainingBytes < LoadBytes) { 547 LoadBytes >>= 1; // Reduce the load size by half. 548 LoadVT = EVT::getIntegerVT(*DAG.getContext(), LoadBytes << 3); 549 } 550 ScalarLoad = DAG.getExtLoad(ISD::EXTLOAD, dl, WideVT, Chain, BasePTR, 551 LD->getPointerInfo().getWithOffset(Offset), 552 LoadVT, LD->isVolatile(), 553 LD->isNonTemporal(), LD->isInvariant(), 554 MinAlign(LD->getAlignment(), Offset), 555 LD->getAAInfo()); 556 } 557 558 RemainingBytes -= LoadBytes; 559 Offset += LoadBytes; 560 BasePTR = DAG.getNode(ISD::ADD, dl, BasePTR.getValueType(), BasePTR, 561 DAG.getConstant(LoadBytes, dl, 562 BasePTR.getValueType())); 563 564 LoadVals.push_back(ScalarLoad.getValue(0)); 565 LoadChains.push_back(ScalarLoad.getValue(1)); 566 } 567 568 // Extract bits, pack and extend/trunc them into destination type. 569 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 570 SDValue SrcEltBitMask = DAG.getConstant((1U << SrcEltBits) - 1, dl, WideVT); 571 572 unsigned BitOffset = 0; 573 unsigned WideIdx = 0; 574 unsigned WideBits = WideVT.getSizeInBits(); 575 576 for (unsigned Idx = 0; Idx != NumElem; ++Idx) { 577 SDValue Lo, Hi, ShAmt; 578 579 if (BitOffset < WideBits) { 580 ShAmt = DAG.getConstant( 581 BitOffset, dl, TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 582 Lo = DAG.getNode(ISD::SRL, dl, WideVT, LoadVals[WideIdx], ShAmt); 583 Lo = DAG.getNode(ISD::AND, dl, WideVT, Lo, SrcEltBitMask); 584 } 585 586 BitOffset += SrcEltBits; 587 if (BitOffset >= WideBits) { 588 WideIdx++; 589 BitOffset -= WideBits; 590 if (BitOffset > 0) { 591 ShAmt = DAG.getConstant( 592 SrcEltBits - BitOffset, dl, 593 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 594 Hi = DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt); 595 Hi = DAG.getNode(ISD::AND, dl, WideVT, Hi, SrcEltBitMask); 596 } 597 } 598 599 if (Hi.getNode()) 600 Lo = DAG.getNode(ISD::OR, dl, WideVT, Lo, Hi); 601 602 switch (ExtType) { 603 default: llvm_unreachable("Unknown extended-load op!"); 604 case ISD::EXTLOAD: 605 Lo = DAG.getAnyExtOrTrunc(Lo, dl, DstEltVT); 606 break; 607 case ISD::ZEXTLOAD: 608 Lo = DAG.getZExtOrTrunc(Lo, dl, DstEltVT); 609 break; 610 case ISD::SEXTLOAD: 611 ShAmt = 612 DAG.getConstant(WideBits - SrcEltBits, dl, 613 TLI.getShiftAmountTy(WideVT, DAG.getDataLayout())); 614 Lo = DAG.getNode(ISD::SHL, dl, WideVT, Lo, ShAmt); 615 Lo = DAG.getNode(ISD::SRA, dl, WideVT, Lo, ShAmt); 616 Lo = DAG.getSExtOrTrunc(Lo, dl, DstEltVT); 617 break; 618 } 619 Vals.push_back(Lo); 620 } 621 622 NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 623 Value = DAG.getNode(ISD::BUILD_VECTOR, dl, 624 Op.getNode()->getValueType(0), Vals); 625 } else { 626 SDValue Scalarized = TLI.scalarizeVectorLoad(LD, DAG); 627 628 NewChain = Scalarized.getValue(1); 629 Value = Scalarized.getValue(0); 630 } 631 632 AddLegalizedOperand(Op.getValue(0), Value); 633 AddLegalizedOperand(Op.getValue(1), NewChain); 634 635 return (Op.getResNo() ? NewChain : Value); 636} 637 638SDValue VectorLegalizer::ExpandStore(SDValue Op) { 639 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode()); 640 641 EVT StVT = ST->getMemoryVT(); 642 EVT MemSclVT = StVT.getScalarType(); 643 unsigned ScalarSize = MemSclVT.getSizeInBits(); 644 645 // Round odd types to the next pow of two. 646 if (!isPowerOf2_32(ScalarSize)) { 647 // FIXME: This is completely broken and inconsistent with ExpandLoad 648 // handling. 649 650 // For sub-byte element sizes, this ends up with 0 stride between elements, 651 // so the same element just gets re-written to the same location. There seem 652 // to be tests explicitly testing for this broken behavior though. tests 653 // for this broken behavior. 654 655 LLVMContext &Ctx = *DAG.getContext(); 656 657 EVT NewMemVT 658 = EVT::getVectorVT(Ctx, 659 MemSclVT.getIntegerVT(Ctx, NextPowerOf2(ScalarSize)), 660 StVT.getVectorNumElements()); 661 662 SDValue NewVectorStore 663 = DAG.getTruncStore(ST->getChain(), SDLoc(Op), ST->getValue(), 664 ST->getBasePtr(), 665 ST->getPointerInfo(), NewMemVT, 666 ST->isVolatile(), ST->isNonTemporal(), 667 ST->getAlignment(), 668 ST->getAAInfo()); 669 ST = cast<StoreSDNode>(NewVectorStore.getNode()); 670 } 671 672 SDValue TF = TLI.scalarizeVectorStore(ST, DAG); 673 AddLegalizedOperand(Op, TF); 674 return TF; 675} 676 677SDValue VectorLegalizer::Expand(SDValue Op) { 678 switch (Op->getOpcode()) { 679 case ISD::SIGN_EXTEND_INREG: 680 return ExpandSEXTINREG(Op); 681 case ISD::ANY_EXTEND_VECTOR_INREG: 682 return ExpandANY_EXTEND_VECTOR_INREG(Op); 683 case ISD::SIGN_EXTEND_VECTOR_INREG: 684 return ExpandSIGN_EXTEND_VECTOR_INREG(Op); 685 case ISD::ZERO_EXTEND_VECTOR_INREG: 686 return ExpandZERO_EXTEND_VECTOR_INREG(Op); 687 case ISD::BSWAP: 688 return ExpandBSWAP(Op); 689 case ISD::VSELECT: 690 return ExpandVSELECT(Op); 691 case ISD::SELECT: 692 return ExpandSELECT(Op); 693 case ISD::UINT_TO_FP: 694 return ExpandUINT_TO_FLOAT(Op); 695 case ISD::FNEG: 696 return ExpandFNEG(Op); 697 case ISD::SETCC: 698 return UnrollVSETCC(Op); 699 case ISD::BITREVERSE: 700 return ExpandBITREVERSE(Op); 701 case ISD::CTLZ_ZERO_UNDEF: 702 case ISD::CTTZ_ZERO_UNDEF: 703 return ExpandCTLZ_CTTZ_ZERO_UNDEF(Op); 704 default: 705 return DAG.UnrollVectorOp(Op.getNode()); 706 } 707} 708 709SDValue VectorLegalizer::ExpandSELECT(SDValue Op) { 710 // Lower a select instruction where the condition is a scalar and the 711 // operands are vectors. Lower this select to VSELECT and implement it 712 // using XOR AND OR. The selector bit is broadcasted. 713 EVT VT = Op.getValueType(); 714 SDLoc DL(Op); 715 716 SDValue Mask = Op.getOperand(0); 717 SDValue Op1 = Op.getOperand(1); 718 SDValue Op2 = Op.getOperand(2); 719 720 assert(VT.isVector() && !Mask.getValueType().isVector() 721 && Op1.getValueType() == Op2.getValueType() && "Invalid type"); 722 723 unsigned NumElem = VT.getVectorNumElements(); 724 725 // If we can't even use the basic vector operations of 726 // AND,OR,XOR, we will have to scalarize the op. 727 // Notice that the operation may be 'promoted' which means that it is 728 // 'bitcasted' to another type which is handled. 729 // Also, we need to be able to construct a splat vector using BUILD_VECTOR. 730 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 731 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 732 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 733 TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand) 734 return DAG.UnrollVectorOp(Op.getNode()); 735 736 // Generate a mask operand. 737 EVT MaskTy = VT.changeVectorElementTypeToInteger(); 738 739 // What is the size of each element in the vector mask. 740 EVT BitTy = MaskTy.getScalarType(); 741 742 Mask = DAG.getSelect(DL, BitTy, Mask, 743 DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, 744 BitTy), 745 DAG.getConstant(0, DL, BitTy)); 746 747 // Broadcast the mask so that the entire vector is all-one or all zero. 748 SmallVector<SDValue, 8> Ops(NumElem, Mask); 749 Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskTy, Ops); 750 751 // Bitcast the operands to be the same type as the mask. 752 // This is needed when we select between FP types because 753 // the mask is a vector of integers. 754 Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1); 755 Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2); 756 757 SDValue AllOnes = DAG.getConstant( 758 APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy); 759 SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes); 760 761 Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask); 762 Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask); 763 SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2); 764 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 765} 766 767SDValue VectorLegalizer::ExpandSEXTINREG(SDValue Op) { 768 EVT VT = Op.getValueType(); 769 770 // Make sure that the SRA and SHL instructions are available. 771 if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand || 772 TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand) 773 return DAG.UnrollVectorOp(Op.getNode()); 774 775 SDLoc DL(Op); 776 EVT OrigTy = cast<VTSDNode>(Op->getOperand(1))->getVT(); 777 778 unsigned BW = VT.getScalarType().getSizeInBits(); 779 unsigned OrigBW = OrigTy.getScalarType().getSizeInBits(); 780 SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT); 781 782 Op = Op.getOperand(0); 783 Op = DAG.getNode(ISD::SHL, DL, VT, Op, ShiftSz); 784 return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz); 785} 786 787// Generically expand a vector anyext in register to a shuffle of the relevant 788// lanes into the appropriate locations, with other lanes left undef. 789SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDValue Op) { 790 SDLoc DL(Op); 791 EVT VT = Op.getValueType(); 792 int NumElements = VT.getVectorNumElements(); 793 SDValue Src = Op.getOperand(0); 794 EVT SrcVT = Src.getValueType(); 795 int NumSrcElements = SrcVT.getVectorNumElements(); 796 797 // Build a base mask of undef shuffles. 798 SmallVector<int, 16> ShuffleMask; 799 ShuffleMask.resize(NumSrcElements, -1); 800 801 // Place the extended lanes into the correct locations. 802 int ExtLaneScale = NumSrcElements / NumElements; 803 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 804 for (int i = 0; i < NumElements; ++i) 805 ShuffleMask[i * ExtLaneScale + EndianOffset] = i; 806 807 return DAG.getNode( 808 ISD::BITCAST, DL, VT, 809 DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask)); 810} 811 812SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDValue Op) { 813 SDLoc DL(Op); 814 EVT VT = Op.getValueType(); 815 SDValue Src = Op.getOperand(0); 816 EVT SrcVT = Src.getValueType(); 817 818 // First build an any-extend node which can be legalized above when we 819 // recurse through it. 820 Op = DAG.getAnyExtendVectorInReg(Src, DL, VT); 821 822 // Now we need sign extend. Do this by shifting the elements. Even if these 823 // aren't legal operations, they have a better chance of being legalized 824 // without full scalarization than the sign extension does. 825 unsigned EltWidth = VT.getVectorElementType().getSizeInBits(); 826 unsigned SrcEltWidth = SrcVT.getVectorElementType().getSizeInBits(); 827 SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT); 828 return DAG.getNode(ISD::SRA, DL, VT, 829 DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount), 830 ShiftAmount); 831} 832 833// Generically expand a vector zext in register to a shuffle of the relevant 834// lanes into the appropriate locations, a blend of zero into the high bits, 835// and a bitcast to the wider element type. 836SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDValue Op) { 837 SDLoc DL(Op); 838 EVT VT = Op.getValueType(); 839 int NumElements = VT.getVectorNumElements(); 840 SDValue Src = Op.getOperand(0); 841 EVT SrcVT = Src.getValueType(); 842 int NumSrcElements = SrcVT.getVectorNumElements(); 843 844 // Build up a zero vector to blend into this one. 845 SDValue Zero = DAG.getConstant(0, DL, SrcVT); 846 847 // Shuffle the incoming lanes into the correct position, and pull all other 848 // lanes from the zero vector. 849 SmallVector<int, 16> ShuffleMask; 850 ShuffleMask.reserve(NumSrcElements); 851 for (int i = 0; i < NumSrcElements; ++i) 852 ShuffleMask.push_back(i); 853 854 int ExtLaneScale = NumSrcElements / NumElements; 855 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0; 856 for (int i = 0; i < NumElements; ++i) 857 ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i; 858 859 return DAG.getNode(ISD::BITCAST, DL, VT, 860 DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask)); 861} 862 863static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) { 864 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8; 865 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I) 866 for (int J = ScalarSizeInBytes - 1; J >= 0; --J) 867 ShuffleMask.push_back((I * ScalarSizeInBytes) + J); 868} 869 870SDValue VectorLegalizer::ExpandBSWAP(SDValue Op) { 871 EVT VT = Op.getValueType(); 872 873 // Generate a byte wise shuffle mask for the BSWAP. 874 SmallVector<int, 16> ShuffleMask; 875 createBSWAPShuffleMask(VT, ShuffleMask); 876 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size()); 877 878 // Only emit a shuffle if the mask is legal. 879 if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT)) 880 return DAG.UnrollVectorOp(Op.getNode()); 881 882 SDLoc DL(Op); 883 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 884 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), ShuffleMask); 885 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 886} 887 888SDValue VectorLegalizer::ExpandBITREVERSE(SDValue Op) { 889 EVT VT = Op.getValueType(); 890 891 // If we have the scalar operation, it's probably cheaper to unroll it. 892 if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) 893 return DAG.UnrollVectorOp(Op.getNode()); 894 895 // If the vector element width is a whole number of bytes, test if its legal 896 // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte 897 // vector. This greatly reduces the number of bit shifts necessary. 898 unsigned ScalarSizeInBits = VT.getScalarSizeInBits(); 899 if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) { 900 SmallVector<int, 16> BSWAPMask; 901 createBSWAPShuffleMask(VT, BSWAPMask); 902 903 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size()); 904 if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) && 905 (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, ByteVT) || 906 (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) && 907 TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) && 908 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, ByteVT) && 909 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, ByteVT)))) { 910 SDLoc DL(Op); 911 Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Op.getOperand(0)); 912 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), 913 BSWAPMask); 914 Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op); 915 return DAG.getNode(ISD::BITCAST, DL, VT, Op); 916 } 917 } 918 919 // If we have the appropriate vector bit operations, it is better to use them 920 // than unrolling and expanding each component. 921 if (!TLI.isOperationLegalOrCustom(ISD::SHL, VT) || 922 !TLI.isOperationLegalOrCustom(ISD::SRL, VT) || 923 !TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 924 !TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT)) 925 return DAG.UnrollVectorOp(Op.getNode()); 926 927 // Let LegalizeDAG handle this later. 928 return Op; 929} 930 931SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { 932 // Implement VSELECT in terms of XOR, AND, OR 933 // on platforms which do not support blend natively. 934 SDLoc DL(Op); 935 936 SDValue Mask = Op.getOperand(0); 937 SDValue Op1 = Op.getOperand(1); 938 SDValue Op2 = Op.getOperand(2); 939 940 EVT VT = Mask.getValueType(); 941 942 // If we can't even use the basic vector operations of 943 // AND,OR,XOR, we will have to scalarize the op. 944 // Notice that the operation may be 'promoted' which means that it is 945 // 'bitcasted' to another type which is handled. 946 // This operation also isn't safe with AND, OR, XOR when the boolean 947 // type is 0/1 as we need an all ones vector constant to mask with. 948 // FIXME: Sign extend 1 to all ones if thats legal on the target. 949 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || 950 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || 951 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || 952 TLI.getBooleanContents(Op1.getValueType()) != 953 TargetLowering::ZeroOrNegativeOneBooleanContent) 954 return DAG.UnrollVectorOp(Op.getNode()); 955 956 // If the mask and the type are different sizes, unroll the vector op. This 957 // can occur when getSetCCResultType returns something that is different in 958 // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8. 959 if (VT.getSizeInBits() != Op1.getValueType().getSizeInBits()) 960 return DAG.UnrollVectorOp(Op.getNode()); 961 962 // Bitcast the operands to be the same type as the mask. 963 // This is needed when we select between FP types because 964 // the mask is a vector of integers. 965 Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1); 966 Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2); 967 968 SDValue AllOnes = DAG.getConstant( 969 APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()), DL, VT); 970 SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes); 971 972 Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask); 973 Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask); 974 SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2); 975 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); 976} 977 978SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) { 979 EVT VT = Op.getOperand(0).getValueType(); 980 SDLoc DL(Op); 981 982 // Make sure that the SINT_TO_FP and SRL instructions are available. 983 if (TLI.getOperationAction(ISD::SINT_TO_FP, VT) == TargetLowering::Expand || 984 TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand) 985 return DAG.UnrollVectorOp(Op.getNode()); 986 987 EVT SVT = VT.getScalarType(); 988 assert((SVT.getSizeInBits() == 64 || SVT.getSizeInBits() == 32) && 989 "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide"); 990 991 unsigned BW = SVT.getSizeInBits(); 992 SDValue HalfWord = DAG.getConstant(BW/2, DL, VT); 993 994 // Constants to clear the upper part of the word. 995 // Notice that we can also use SHL+SHR, but using a constant is slightly 996 // faster on x86. 997 uint64_t HWMask = (SVT.getSizeInBits()==64)?0x00000000FFFFFFFF:0x0000FFFF; 998 SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT); 999 1000 // Two to the power of half-word-size. 1001 SDValue TWOHW = DAG.getConstantFP(1 << (BW/2), DL, Op.getValueType()); 1002 1003 // Clear upper part of LO, lower HI 1004 SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Op.getOperand(0), HalfWord); 1005 SDValue LO = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), HalfWordMask); 1006 1007 // Convert hi and lo to floats 1008 // Convert the hi part back to the upper values 1009 // TODO: Can any fast-math-flags be set on these nodes? 1010 SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), HI); 1011 fHI = DAG.getNode(ISD::FMUL, DL, Op.getValueType(), fHI, TWOHW); 1012 SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Op.getValueType(), LO); 1013 1014 // Add the two halves 1015 return DAG.getNode(ISD::FADD, DL, Op.getValueType(), fHI, fLO); 1016} 1017 1018 1019SDValue VectorLegalizer::ExpandFNEG(SDValue Op) { 1020 if (TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) { 1021 SDLoc DL(Op); 1022 SDValue Zero = DAG.getConstantFP(-0.0, DL, Op.getValueType()); 1023 // TODO: If FNEG had fast-math-flags, they'd get propagated to this FSUB. 1024 return DAG.getNode(ISD::FSUB, DL, Op.getValueType(), 1025 Zero, Op.getOperand(0)); 1026 } 1027 return DAG.UnrollVectorOp(Op.getNode()); 1028} 1029 1030SDValue VectorLegalizer::ExpandCTLZ_CTTZ_ZERO_UNDEF(SDValue Op) { 1031 // If the non-ZERO_UNDEF version is supported we can use that instead. 1032 unsigned Opc = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF ? ISD::CTLZ : ISD::CTTZ; 1033 if (TLI.isOperationLegalOrCustom(Opc, Op.getValueType())) { 1034 SDLoc DL(Op); 1035 return DAG.getNode(Opc, DL, Op.getValueType(), Op.getOperand(0)); 1036 } 1037 1038 // Otherwise go ahead and unroll. 1039 return DAG.UnrollVectorOp(Op.getNode()); 1040} 1041 1042SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) { 1043 EVT VT = Op.getValueType(); 1044 unsigned NumElems = VT.getVectorNumElements(); 1045 EVT EltVT = VT.getVectorElementType(); 1046 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1), CC = Op.getOperand(2); 1047 EVT TmpEltVT = LHS.getValueType().getVectorElementType(); 1048 SDLoc dl(Op); 1049 SmallVector<SDValue, 8> Ops(NumElems); 1050 for (unsigned i = 0; i < NumElems; ++i) { 1051 SDValue LHSElem = DAG.getNode( 1052 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS, 1053 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1054 SDValue RHSElem = DAG.getNode( 1055 ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS, 1056 DAG.getConstant(i, dl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 1057 Ops[i] = DAG.getNode(ISD::SETCC, dl, 1058 TLI.getSetCCResultType(DAG.getDataLayout(), 1059 *DAG.getContext(), TmpEltVT), 1060 LHSElem, RHSElem, CC); 1061 Ops[i] = DAG.getSelect(dl, EltVT, Ops[i], 1062 DAG.getConstant(APInt::getAllOnesValue 1063 (EltVT.getSizeInBits()), dl, EltVT), 1064 DAG.getConstant(0, dl, EltVT)); 1065 } 1066 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); 1067} 1068 1069} 1070 1071bool SelectionDAG::LegalizeVectors() { 1072 return VectorLegalizer(*this).Run(); 1073} 1074