SLPVectorizer.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10// stores that can be put together into vector-stores. Next, it attempts to 11// construct vectorizable tree using the use-def chains. If a profitable tree 12// was found, the SLP vectorizer performs vectorization on the tree. 13// 14// The pass is inspired by the work described in the paper: 15// "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16// 17//===----------------------------------------------------------------------===// 18#define SV_NAME "slp-vectorizer" 19#define DEBUG_TYPE "SLP" 20 21#include "llvm/Transforms/Vectorize.h" 22#include "llvm/ADT/MapVector.h" 23#include "llvm/ADT/PostOrderIterator.h" 24#include "llvm/ADT/SetVector.h" 25#include "llvm/Analysis/AliasAnalysis.h" 26#include "llvm/Analysis/LoopInfo.h" 27#include "llvm/Analysis/ScalarEvolution.h" 28#include "llvm/Analysis/ScalarEvolutionExpressions.h" 29#include "llvm/Analysis/TargetTransformInfo.h" 30#include "llvm/Analysis/ValueTracking.h" 31#include "llvm/IR/DataLayout.h" 32#include "llvm/IR/Dominators.h" 33#include "llvm/IR/IRBuilder.h" 34#include "llvm/IR/Instructions.h" 35#include "llvm/IR/IntrinsicInst.h" 36#include "llvm/IR/Module.h" 37#include "llvm/IR/Type.h" 38#include "llvm/IR/Value.h" 39#include "llvm/IR/Verifier.h" 40#include "llvm/Pass.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/raw_ostream.h" 44#include <algorithm> 45#include <map> 46 47using namespace llvm; 48 49static cl::opt<int> 50 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 51 cl::desc("Only vectorize if you gain more than this " 52 "number ")); 53 54static cl::opt<bool> 55ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 56 cl::desc("Attempt to vectorize horizontal reductions")); 57 58static cl::opt<bool> ShouldStartVectorizeHorAtStore( 59 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 60 cl::desc( 61 "Attempt to vectorize horizontal reductions feeding into a store")); 62 63namespace { 64 65static const unsigned MinVecRegSize = 128; 66 67static const unsigned RecursionMaxDepth = 12; 68 69/// A helper class for numbering instructions in multiple blocks. 70/// Numbers start at zero for each basic block. 71struct BlockNumbering { 72 73 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 74 75 BlockNumbering() : BB(0), Valid(false) {} 76 77 void numberInstructions() { 78 unsigned Loc = 0; 79 InstrIdx.clear(); 80 InstrVec.clear(); 81 // Number the instructions in the block. 82 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 83 InstrIdx[it] = Loc++; 84 InstrVec.push_back(it); 85 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 86 } 87 Valid = true; 88 } 89 90 int getIndex(Instruction *I) { 91 assert(I->getParent() == BB && "Invalid instruction"); 92 if (!Valid) 93 numberInstructions(); 94 assert(InstrIdx.count(I) && "Unknown instruction"); 95 return InstrIdx[I]; 96 } 97 98 Instruction *getInstruction(unsigned loc) { 99 if (!Valid) 100 numberInstructions(); 101 assert(InstrVec.size() > loc && "Invalid Index"); 102 return InstrVec[loc]; 103 } 104 105 void forget() { Valid = false; } 106 107private: 108 /// The block we are numbering. 109 BasicBlock *BB; 110 /// Is the block numbered. 111 bool Valid; 112 /// Maps instructions to numbers and back. 113 SmallDenseMap<Instruction *, int> InstrIdx; 114 /// Maps integers to Instructions. 115 SmallVector<Instruction *, 32> InstrVec; 116}; 117 118/// \returns the parent basic block if all of the instructions in \p VL 119/// are in the same block or null otherwise. 120static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 121 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 122 if (!I0) 123 return 0; 124 BasicBlock *BB = I0->getParent(); 125 for (int i = 1, e = VL.size(); i < e; i++) { 126 Instruction *I = dyn_cast<Instruction>(VL[i]); 127 if (!I) 128 return 0; 129 130 if (BB != I->getParent()) 131 return 0; 132 } 133 return BB; 134} 135 136/// \returns True if all of the values in \p VL are constants. 137static bool allConstant(ArrayRef<Value *> VL) { 138 for (unsigned i = 0, e = VL.size(); i < e; ++i) 139 if (!isa<Constant>(VL[i])) 140 return false; 141 return true; 142} 143 144/// \returns True if all of the values in \p VL are identical. 145static bool isSplat(ArrayRef<Value *> VL) { 146 for (unsigned i = 1, e = VL.size(); i < e; ++i) 147 if (VL[i] != VL[0]) 148 return false; 149 return true; 150} 151 152/// \returns The opcode if all of the Instructions in \p VL have the same 153/// opcode, or zero. 154static unsigned getSameOpcode(ArrayRef<Value *> VL) { 155 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 156 if (!I0) 157 return 0; 158 unsigned Opcode = I0->getOpcode(); 159 for (int i = 1, e = VL.size(); i < e; i++) { 160 Instruction *I = dyn_cast<Instruction>(VL[i]); 161 if (!I || Opcode != I->getOpcode()) 162 return 0; 163 } 164 return Opcode; 165} 166 167/// \returns \p I after propagating metadata from \p VL. 168static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 169 Instruction *I0 = cast<Instruction>(VL[0]); 170 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 171 I0->getAllMetadataOtherThanDebugLoc(Metadata); 172 173 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 174 unsigned Kind = Metadata[i].first; 175 MDNode *MD = Metadata[i].second; 176 177 for (int i = 1, e = VL.size(); MD && i != e; i++) { 178 Instruction *I = cast<Instruction>(VL[i]); 179 MDNode *IMD = I->getMetadata(Kind); 180 181 switch (Kind) { 182 default: 183 MD = 0; // Remove unknown metadata 184 break; 185 case LLVMContext::MD_tbaa: 186 MD = MDNode::getMostGenericTBAA(MD, IMD); 187 break; 188 case LLVMContext::MD_fpmath: 189 MD = MDNode::getMostGenericFPMath(MD, IMD); 190 break; 191 } 192 } 193 I->setMetadata(Kind, MD); 194 } 195 return I; 196} 197 198/// \returns The type that all of the values in \p VL have or null if there 199/// are different types. 200static Type* getSameType(ArrayRef<Value *> VL) { 201 Type *Ty = VL[0]->getType(); 202 for (int i = 1, e = VL.size(); i < e; i++) 203 if (VL[i]->getType() != Ty) 204 return 0; 205 206 return Ty; 207} 208 209/// \returns True if the ExtractElement instructions in VL can be vectorized 210/// to use the original vector. 211static bool CanReuseExtract(ArrayRef<Value *> VL) { 212 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 213 // Check if all of the extracts come from the same vector and from the 214 // correct offset. 215 Value *VL0 = VL[0]; 216 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 217 Value *Vec = E0->getOperand(0); 218 219 // We have to extract from the same vector type. 220 unsigned NElts = Vec->getType()->getVectorNumElements(); 221 222 if (NElts != VL.size()) 223 return false; 224 225 // Check that all of the indices extract from the correct offset. 226 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 227 if (!CI || CI->getZExtValue()) 228 return false; 229 230 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 231 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 232 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 233 234 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 235 return false; 236 } 237 238 return true; 239} 240 241static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 242 SmallVectorImpl<Value *> &Left, 243 SmallVectorImpl<Value *> &Right) { 244 245 SmallVector<Value *, 16> OrigLeft, OrigRight; 246 247 bool AllSameOpcodeLeft = true; 248 bool AllSameOpcodeRight = true; 249 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 250 Instruction *I = cast<Instruction>(VL[i]); 251 Value *V0 = I->getOperand(0); 252 Value *V1 = I->getOperand(1); 253 254 OrigLeft.push_back(V0); 255 OrigRight.push_back(V1); 256 257 Instruction *I0 = dyn_cast<Instruction>(V0); 258 Instruction *I1 = dyn_cast<Instruction>(V1); 259 260 // Check whether all operands on one side have the same opcode. In this case 261 // we want to preserve the original order and not make things worse by 262 // reordering. 263 AllSameOpcodeLeft = I0; 264 AllSameOpcodeRight = I1; 265 266 if (i && AllSameOpcodeLeft) { 267 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 268 if(P0->getOpcode() != I0->getOpcode()) 269 AllSameOpcodeLeft = false; 270 } else 271 AllSameOpcodeLeft = false; 272 } 273 if (i && AllSameOpcodeRight) { 274 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 275 if(P1->getOpcode() != I1->getOpcode()) 276 AllSameOpcodeRight = false; 277 } else 278 AllSameOpcodeRight = false; 279 } 280 281 // Sort two opcodes. In the code below we try to preserve the ability to use 282 // broadcast of values instead of individual inserts. 283 // vl1 = load 284 // vl2 = phi 285 // vr1 = load 286 // vr2 = vr2 287 // = vl1 x vr1 288 // = vl2 x vr2 289 // If we just sorted according to opcode we would leave the first line in 290 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 291 // = vl1 x vr1 292 // = vr2 x vl2 293 // Because vr2 and vr1 are from the same load we loose the opportunity of a 294 // broadcast for the packed right side in the backend: we have [vr1, vl2] 295 // instead of [vr1, vr2=vr1]. 296 if (I0 && I1) { 297 if(!i && I0->getOpcode() > I1->getOpcode()) { 298 Left.push_back(I1); 299 Right.push_back(I0); 300 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 301 // Try not to destroy a broad cast for no apparent benefit. 302 Left.push_back(I1); 303 Right.push_back(I0); 304 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 305 // Try preserve broadcasts. 306 Left.push_back(I1); 307 Right.push_back(I0); 308 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 309 // Try preserve broadcasts. 310 Left.push_back(I1); 311 Right.push_back(I0); 312 } else { 313 Left.push_back(I0); 314 Right.push_back(I1); 315 } 316 continue; 317 } 318 // One opcode, put the instruction on the right. 319 if (I0) { 320 Left.push_back(V1); 321 Right.push_back(I0); 322 continue; 323 } 324 Left.push_back(V0); 325 Right.push_back(V1); 326 } 327 328 bool LeftBroadcast = isSplat(Left); 329 bool RightBroadcast = isSplat(Right); 330 331 // Don't reorder if the operands where good to begin with. 332 if (!(LeftBroadcast || RightBroadcast) && 333 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 334 Left = OrigLeft; 335 Right = OrigRight; 336 } 337} 338 339/// Bottom Up SLP Vectorizer. 340class BoUpSLP { 341public: 342 typedef SmallVector<Value *, 8> ValueList; 343 typedef SmallVector<Instruction *, 16> InstrList; 344 typedef SmallPtrSet<Value *, 16> ValueSet; 345 typedef SmallVector<StoreInst *, 8> StoreList; 346 347 BoUpSLP(Function *Func, ScalarEvolution *Se, const DataLayout *Dl, 348 TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li, 349 DominatorTree *Dt) : 350 F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt), 351 Builder(Se->getContext()) { 352 // Setup the block numbering utility for all of the blocks in the 353 // function. 354 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 355 BasicBlock *BB = it; 356 BlocksNumbers[BB] = BlockNumbering(BB); 357 } 358 } 359 360 /// \brief Vectorize the tree that starts with the elements in \p VL. 361 /// Returns the vectorized root. 362 Value *vectorizeTree(); 363 364 /// \returns the vectorization cost of the subtree that starts at \p VL. 365 /// A negative number means that this is profitable. 366 int getTreeCost(); 367 368 /// Construct a vectorizable tree that starts at \p Roots and is possibly 369 /// used by a reduction of \p RdxOps. 370 void buildTree(ArrayRef<Value *> Roots, ValueSet *RdxOps = 0); 371 372 /// Clear the internal data structures that are created by 'buildTree'. 373 void deleteTree() { 374 RdxOps = 0; 375 VectorizableTree.clear(); 376 ScalarToTreeEntry.clear(); 377 MustGather.clear(); 378 ExternalUses.clear(); 379 MemBarrierIgnoreList.clear(); 380 } 381 382 /// \returns true if the memory operations A and B are consecutive. 383 bool isConsecutiveAccess(Value *A, Value *B); 384 385 /// \brief Perform LICM and CSE on the newly generated gather sequences. 386 void optimizeGatherSequence(); 387private: 388 struct TreeEntry; 389 390 /// \returns the cost of the vectorizable entry. 391 int getEntryCost(TreeEntry *E); 392 393 /// This is the recursive part of buildTree. 394 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 395 396 /// Vectorize a single entry in the tree. 397 Value *vectorizeTree(TreeEntry *E); 398 399 /// Vectorize a single entry in the tree, starting in \p VL. 400 Value *vectorizeTree(ArrayRef<Value *> VL); 401 402 /// \returns the pointer to the vectorized value if \p VL is already 403 /// vectorized, or NULL. They may happen in cycles. 404 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 405 406 /// \brief Take the pointer operand from the Load/Store instruction. 407 /// \returns NULL if this is not a valid Load/Store instruction. 408 static Value *getPointerOperand(Value *I); 409 410 /// \brief Take the address space operand from the Load/Store instruction. 411 /// \returns -1 if this is not a valid Load/Store instruction. 412 static unsigned getAddressSpaceOperand(Value *I); 413 414 /// \returns the scalarization cost for this type. Scalarization in this 415 /// context means the creation of vectors from a group of scalars. 416 int getGatherCost(Type *Ty); 417 418 /// \returns the scalarization cost for this list of values. Assuming that 419 /// this subtree gets vectorized, we may need to extract the values from the 420 /// roots. This method calculates the cost of extracting the values. 421 int getGatherCost(ArrayRef<Value *> VL); 422 423 /// \returns the AA location that is being access by the instruction. 424 AliasAnalysis::Location getLocation(Instruction *I); 425 426 /// \brief Checks if it is possible to sink an instruction from 427 /// \p Src to \p Dst. 428 /// \returns the pointer to the barrier instruction if we can't sink. 429 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 430 431 /// \returns the index of the last instruction in the BB from \p VL. 432 int getLastIndex(ArrayRef<Value *> VL); 433 434 /// \returns the Instruction in the bundle \p VL. 435 Instruction *getLastInstruction(ArrayRef<Value *> VL); 436 437 /// \brief Set the Builder insert point to one after the last instruction in 438 /// the bundle 439 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 440 441 /// \returns a vector from a collection of scalars in \p VL. 442 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 443 444 /// \returns whether the VectorizableTree is fully vectoriable and will 445 /// be beneficial even the tree height is tiny. 446 bool isFullyVectorizableTinyTree(); 447 448 struct TreeEntry { 449 TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0), 450 NeedToGather(0) {} 451 452 /// \returns true if the scalars in VL are equal to this entry. 453 bool isSame(ArrayRef<Value *> VL) const { 454 assert(VL.size() == Scalars.size() && "Invalid size"); 455 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 456 } 457 458 /// A vector of scalars. 459 ValueList Scalars; 460 461 /// The Scalars are vectorized into this value. It is initialized to Null. 462 Value *VectorizedValue; 463 464 /// The index in the basic block of the last scalar. 465 int LastScalarIndex; 466 467 /// Do we need to gather this sequence ? 468 bool NeedToGather; 469 }; 470 471 /// Create a new VectorizableTree entry. 472 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 473 VectorizableTree.push_back(TreeEntry()); 474 int idx = VectorizableTree.size() - 1; 475 TreeEntry *Last = &VectorizableTree[idx]; 476 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 477 Last->NeedToGather = !Vectorized; 478 if (Vectorized) { 479 Last->LastScalarIndex = getLastIndex(VL); 480 for (int i = 0, e = VL.size(); i != e; ++i) { 481 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 482 ScalarToTreeEntry[VL[i]] = idx; 483 } 484 } else { 485 Last->LastScalarIndex = 0; 486 MustGather.insert(VL.begin(), VL.end()); 487 } 488 return Last; 489 } 490 491 /// -- Vectorization State -- 492 /// Holds all of the tree entries. 493 std::vector<TreeEntry> VectorizableTree; 494 495 /// Maps a specific scalar to its tree entry. 496 SmallDenseMap<Value*, int> ScalarToTreeEntry; 497 498 /// A list of scalars that we found that we need to keep as scalars. 499 ValueSet MustGather; 500 501 /// This POD struct describes one external user in the vectorized tree. 502 struct ExternalUser { 503 ExternalUser (Value *S, llvm::User *U, int L) : 504 Scalar(S), User(U), Lane(L){}; 505 // Which scalar in our function. 506 Value *Scalar; 507 // Which user that uses the scalar. 508 llvm::User *User; 509 // Which lane does the scalar belong to. 510 int Lane; 511 }; 512 typedef SmallVector<ExternalUser, 16> UserList; 513 514 /// A list of values that need to extracted out of the tree. 515 /// This list holds pairs of (Internal Scalar : External User). 516 UserList ExternalUses; 517 518 /// A list of instructions to ignore while sinking 519 /// memory instructions. This map must be reset between runs of getCost. 520 ValueSet MemBarrierIgnoreList; 521 522 /// Holds all of the instructions that we gathered. 523 SetVector<Instruction *> GatherSeq; 524 /// A list of blocks that we are going to CSE. 525 SetVector<BasicBlock *> CSEBlocks; 526 527 /// Numbers instructions in different blocks. 528 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 529 530 /// Reduction operators. 531 ValueSet *RdxOps; 532 533 // Analysis and block reference. 534 Function *F; 535 ScalarEvolution *SE; 536 const DataLayout *DL; 537 TargetTransformInfo *TTI; 538 AliasAnalysis *AA; 539 LoopInfo *LI; 540 DominatorTree *DT; 541 /// Instruction builder to construct the vectorized tree. 542 IRBuilder<> Builder; 543}; 544 545void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) { 546 deleteTree(); 547 RdxOps = Rdx; 548 if (!getSameType(Roots)) 549 return; 550 buildTree_rec(Roots, 0); 551 552 // Collect the values that we need to extract from the tree. 553 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 554 TreeEntry *Entry = &VectorizableTree[EIdx]; 555 556 // For each lane: 557 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 558 Value *Scalar = Entry->Scalars[Lane]; 559 560 // No need to handle users of gathered values. 561 if (Entry->NeedToGather) 562 continue; 563 564 for (User *U : Scalar->users()) { 565 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 566 567 // Skip in-tree scalars that become vectors. 568 if (ScalarToTreeEntry.count(U)) { 569 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 570 *U << ".\n"); 571 int Idx = ScalarToTreeEntry[U]; (void) Idx; 572 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 573 continue; 574 } 575 Instruction *UserInst = dyn_cast<Instruction>(U); 576 if (!UserInst) 577 continue; 578 579 // Ignore uses that are part of the reduction. 580 if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end()) 581 continue; 582 583 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 584 Lane << " from " << *Scalar << ".\n"); 585 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 586 } 587 } 588 } 589} 590 591 592void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 593 bool SameTy = getSameType(VL); (void)SameTy; 594 assert(SameTy && "Invalid types!"); 595 596 if (Depth == RecursionMaxDepth) { 597 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 598 newTreeEntry(VL, false); 599 return; 600 } 601 602 // Don't handle vectors. 603 if (VL[0]->getType()->isVectorTy()) { 604 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 605 newTreeEntry(VL, false); 606 return; 607 } 608 609 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 610 if (SI->getValueOperand()->getType()->isVectorTy()) { 611 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 612 newTreeEntry(VL, false); 613 return; 614 } 615 616 // If all of the operands are identical or constant we have a simple solution. 617 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || 618 !getSameOpcode(VL)) { 619 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 620 newTreeEntry(VL, false); 621 return; 622 } 623 624 // We now know that this is a vector of instructions of the same type from 625 // the same block. 626 627 // Check if this is a duplicate of another entry. 628 if (ScalarToTreeEntry.count(VL[0])) { 629 int Idx = ScalarToTreeEntry[VL[0]]; 630 TreeEntry *E = &VectorizableTree[Idx]; 631 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 632 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 633 if (E->Scalars[i] != VL[i]) { 634 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 635 newTreeEntry(VL, false); 636 return; 637 } 638 } 639 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 640 return; 641 } 642 643 // Check that none of the instructions in the bundle are already in the tree. 644 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 645 if (ScalarToTreeEntry.count(VL[i])) { 646 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 647 ") is already in tree.\n"); 648 newTreeEntry(VL, false); 649 return; 650 } 651 } 652 653 // If any of the scalars appears in the table OR it is marked as a value that 654 // needs to stat scalar then we need to gather the scalars. 655 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 656 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 657 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 658 newTreeEntry(VL, false); 659 return; 660 } 661 } 662 663 // Check that all of the users of the scalars that we want to vectorize are 664 // schedulable. 665 Instruction *VL0 = cast<Instruction>(VL[0]); 666 int MyLastIndex = getLastIndex(VL); 667 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 668 669 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 670 Instruction *Scalar = cast<Instruction>(VL[i]); 671 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 672 for (User *U : Scalar->users()) { 673 DEBUG(dbgs() << "SLP: \tUser " << *U << ". \n"); 674 Instruction *UI = dyn_cast<Instruction>(U); 675 if (!UI) { 676 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 677 newTreeEntry(VL, false); 678 return; 679 } 680 681 // We don't care if the user is in a different basic block. 682 BasicBlock *UserBlock = UI->getParent(); 683 if (UserBlock != BB) { 684 DEBUG(dbgs() << "SLP: User from a different basic block " 685 << *UI << ". \n"); 686 continue; 687 } 688 689 // If this is a PHINode within this basic block then we can place the 690 // extract wherever we want. 691 if (isa<PHINode>(*UI)) { 692 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *UI << ". \n"); 693 continue; 694 } 695 696 // Check if this is a safe in-tree user. 697 if (ScalarToTreeEntry.count(UI)) { 698 int Idx = ScalarToTreeEntry[UI]; 699 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 700 if (VecLocation <= MyLastIndex) { 701 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 702 newTreeEntry(VL, false); 703 return; 704 } 705 DEBUG(dbgs() << "SLP: In-tree user (" << *UI << ") at #" << 706 VecLocation << " vector value (" << *Scalar << ") at #" 707 << MyLastIndex << ".\n"); 708 continue; 709 } 710 711 // This user is part of the reduction. 712 if (RdxOps && RdxOps->count(UI)) 713 continue; 714 715 // Make sure that we can schedule this unknown user. 716 BlockNumbering &BN = BlocksNumbers[BB]; 717 int UserIndex = BN.getIndex(UI); 718 if (UserIndex < MyLastIndex) { 719 720 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 721 << *UI << ". \n"); 722 newTreeEntry(VL, false); 723 return; 724 } 725 } 726 } 727 728 // Check that every instructions appears once in this bundle. 729 for (unsigned i = 0, e = VL.size(); i < e; ++i) 730 for (unsigned j = i+1; j < e; ++j) 731 if (VL[i] == VL[j]) { 732 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 733 newTreeEntry(VL, false); 734 return; 735 } 736 737 // Check that instructions in this bundle don't reference other instructions. 738 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 739 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 740 for (User *U : VL[i]->users()) { 741 for (unsigned j = 0; j < e; ++j) { 742 if (i != j && U == VL[j]) { 743 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << *U << ". \n"); 744 newTreeEntry(VL, false); 745 return; 746 } 747 } 748 } 749 } 750 751 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 752 753 unsigned Opcode = getSameOpcode(VL); 754 755 // Check if it is safe to sink the loads or the stores. 756 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 757 Instruction *Last = getLastInstruction(VL); 758 759 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 760 if (VL[i] == Last) 761 continue; 762 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 763 if (Barrier) { 764 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 765 << "\n because of " << *Barrier << ". Gathering.\n"); 766 newTreeEntry(VL, false); 767 return; 768 } 769 } 770 } 771 772 switch (Opcode) { 773 case Instruction::PHI: { 774 PHINode *PH = dyn_cast<PHINode>(VL0); 775 776 // Check for terminator values (e.g. invoke). 777 for (unsigned j = 0; j < VL.size(); ++j) 778 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 779 TerminatorInst *Term = dyn_cast<TerminatorInst>( 780 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 781 if (Term) { 782 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 783 newTreeEntry(VL, false); 784 return; 785 } 786 } 787 788 newTreeEntry(VL, true); 789 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 790 791 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 792 ValueList Operands; 793 // Prepare the operand vector. 794 for (unsigned j = 0; j < VL.size(); ++j) 795 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 796 PH->getIncomingBlock(i))); 797 798 buildTree_rec(Operands, Depth + 1); 799 } 800 return; 801 } 802 case Instruction::ExtractElement: { 803 bool Reuse = CanReuseExtract(VL); 804 if (Reuse) { 805 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 806 } 807 newTreeEntry(VL, Reuse); 808 return; 809 } 810 case Instruction::Load: { 811 // Check if the loads are consecutive or of we need to swizzle them. 812 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 813 LoadInst *L = cast<LoadInst>(VL[i]); 814 if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { 815 newTreeEntry(VL, false); 816 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 817 return; 818 } 819 } 820 newTreeEntry(VL, true); 821 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 822 return; 823 } 824 case Instruction::ZExt: 825 case Instruction::SExt: 826 case Instruction::FPToUI: 827 case Instruction::FPToSI: 828 case Instruction::FPExt: 829 case Instruction::PtrToInt: 830 case Instruction::IntToPtr: 831 case Instruction::SIToFP: 832 case Instruction::UIToFP: 833 case Instruction::Trunc: 834 case Instruction::FPTrunc: 835 case Instruction::BitCast: { 836 Type *SrcTy = VL0->getOperand(0)->getType(); 837 for (unsigned i = 0; i < VL.size(); ++i) { 838 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 839 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 840 newTreeEntry(VL, false); 841 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 842 return; 843 } 844 } 845 newTreeEntry(VL, true); 846 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 847 848 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 849 ValueList Operands; 850 // Prepare the operand vector. 851 for (unsigned j = 0; j < VL.size(); ++j) 852 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 853 854 buildTree_rec(Operands, Depth+1); 855 } 856 return; 857 } 858 case Instruction::ICmp: 859 case Instruction::FCmp: { 860 // Check that all of the compares have the same predicate. 861 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 862 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 863 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 864 CmpInst *Cmp = cast<CmpInst>(VL[i]); 865 if (Cmp->getPredicate() != P0 || 866 Cmp->getOperand(0)->getType() != ComparedTy) { 867 newTreeEntry(VL, false); 868 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 869 return; 870 } 871 } 872 873 newTreeEntry(VL, true); 874 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 875 876 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 877 ValueList Operands; 878 // Prepare the operand vector. 879 for (unsigned j = 0; j < VL.size(); ++j) 880 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 881 882 buildTree_rec(Operands, Depth+1); 883 } 884 return; 885 } 886 case Instruction::Select: 887 case Instruction::Add: 888 case Instruction::FAdd: 889 case Instruction::Sub: 890 case Instruction::FSub: 891 case Instruction::Mul: 892 case Instruction::FMul: 893 case Instruction::UDiv: 894 case Instruction::SDiv: 895 case Instruction::FDiv: 896 case Instruction::URem: 897 case Instruction::SRem: 898 case Instruction::FRem: 899 case Instruction::Shl: 900 case Instruction::LShr: 901 case Instruction::AShr: 902 case Instruction::And: 903 case Instruction::Or: 904 case Instruction::Xor: { 905 newTreeEntry(VL, true); 906 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 907 908 // Sort operands of the instructions so that each side is more likely to 909 // have the same opcode. 910 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 911 ValueList Left, Right; 912 reorderInputsAccordingToOpcode(VL, Left, Right); 913 buildTree_rec(Left, Depth + 1); 914 buildTree_rec(Right, Depth + 1); 915 return; 916 } 917 918 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 919 ValueList Operands; 920 // Prepare the operand vector. 921 for (unsigned j = 0; j < VL.size(); ++j) 922 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 923 924 buildTree_rec(Operands, Depth+1); 925 } 926 return; 927 } 928 case Instruction::Store: { 929 // Check if the stores are consecutive or of we need to swizzle them. 930 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 931 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 932 newTreeEntry(VL, false); 933 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 934 return; 935 } 936 937 newTreeEntry(VL, true); 938 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 939 940 ValueList Operands; 941 for (unsigned j = 0; j < VL.size(); ++j) 942 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 943 944 // We can ignore these values because we are sinking them down. 945 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 946 buildTree_rec(Operands, Depth + 1); 947 return; 948 } 949 case Instruction::Call: { 950 // Check if the calls are all to the same vectorizable intrinsic. 951 IntrinsicInst *II = dyn_cast<IntrinsicInst>(VL[0]); 952 if (II==NULL) { 953 newTreeEntry(VL, false); 954 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 955 return; 956 } 957 958 Function *Int = II->getCalledFunction(); 959 960 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 961 IntrinsicInst *II2 = dyn_cast<IntrinsicInst>(VL[i]); 962 if (!II2 || II2->getCalledFunction() != Int) { 963 newTreeEntry(VL, false); 964 DEBUG(dbgs() << "SLP: mismatched calls:" << *II << "!=" << *VL[i] 965 << "\n"); 966 return; 967 } 968 } 969 970 newTreeEntry(VL, true); 971 for (unsigned i = 0, e = II->getNumArgOperands(); i != e; ++i) { 972 ValueList Operands; 973 // Prepare the operand vector. 974 for (unsigned j = 0; j < VL.size(); ++j) { 975 IntrinsicInst *II2 = dyn_cast<IntrinsicInst>(VL[j]); 976 Operands.push_back(II2->getArgOperand(i)); 977 } 978 buildTree_rec(Operands, Depth + 1); 979 } 980 return; 981 } 982 default: 983 newTreeEntry(VL, false); 984 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 985 return; 986 } 987} 988 989int BoUpSLP::getEntryCost(TreeEntry *E) { 990 ArrayRef<Value*> VL = E->Scalars; 991 992 Type *ScalarTy = VL[0]->getType(); 993 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 994 ScalarTy = SI->getValueOperand()->getType(); 995 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 996 997 if (E->NeedToGather) { 998 if (allConstant(VL)) 999 return 0; 1000 if (isSplat(VL)) { 1001 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1002 } 1003 return getGatherCost(E->Scalars); 1004 } 1005 1006 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) && 1007 "Invalid VL"); 1008 Instruction *VL0 = cast<Instruction>(VL[0]); 1009 unsigned Opcode = VL0->getOpcode(); 1010 switch (Opcode) { 1011 case Instruction::PHI: { 1012 return 0; 1013 } 1014 case Instruction::ExtractElement: { 1015 if (CanReuseExtract(VL)) { 1016 int DeadCost = 0; 1017 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1018 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1019 if (E->hasOneUse()) 1020 // Take credit for instruction that will become dead. 1021 DeadCost += 1022 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1023 } 1024 return -DeadCost; 1025 } 1026 return getGatherCost(VecTy); 1027 } 1028 case Instruction::ZExt: 1029 case Instruction::SExt: 1030 case Instruction::FPToUI: 1031 case Instruction::FPToSI: 1032 case Instruction::FPExt: 1033 case Instruction::PtrToInt: 1034 case Instruction::IntToPtr: 1035 case Instruction::SIToFP: 1036 case Instruction::UIToFP: 1037 case Instruction::Trunc: 1038 case Instruction::FPTrunc: 1039 case Instruction::BitCast: { 1040 Type *SrcTy = VL0->getOperand(0)->getType(); 1041 1042 // Calculate the cost of this instruction. 1043 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1044 VL0->getType(), SrcTy); 1045 1046 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1047 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1048 return VecCost - ScalarCost; 1049 } 1050 case Instruction::FCmp: 1051 case Instruction::ICmp: 1052 case Instruction::Select: 1053 case Instruction::Add: 1054 case Instruction::FAdd: 1055 case Instruction::Sub: 1056 case Instruction::FSub: 1057 case Instruction::Mul: 1058 case Instruction::FMul: 1059 case Instruction::UDiv: 1060 case Instruction::SDiv: 1061 case Instruction::FDiv: 1062 case Instruction::URem: 1063 case Instruction::SRem: 1064 case Instruction::FRem: 1065 case Instruction::Shl: 1066 case Instruction::LShr: 1067 case Instruction::AShr: 1068 case Instruction::And: 1069 case Instruction::Or: 1070 case Instruction::Xor: { 1071 // Calculate the cost of this instruction. 1072 int ScalarCost = 0; 1073 int VecCost = 0; 1074 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1075 Opcode == Instruction::Select) { 1076 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1077 ScalarCost = VecTy->getNumElements() * 1078 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1079 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1080 } else { 1081 // Certain instructions can be cheaper to vectorize if they have a 1082 // constant second vector operand. 1083 TargetTransformInfo::OperandValueKind Op1VK = 1084 TargetTransformInfo::OK_AnyValue; 1085 TargetTransformInfo::OperandValueKind Op2VK = 1086 TargetTransformInfo::OK_UniformConstantValue; 1087 1088 // If all operands are exactly the same ConstantInt then set the 1089 // operand kind to OK_UniformConstantValue. 1090 // If instead not all operands are constants, then set the operand kind 1091 // to OK_AnyValue. If all operands are constants but not the same, 1092 // then set the operand kind to OK_NonUniformConstantValue. 1093 ConstantInt *CInt = NULL; 1094 for (unsigned i = 0; i < VL.size(); ++i) { 1095 const Instruction *I = cast<Instruction>(VL[i]); 1096 if (!isa<ConstantInt>(I->getOperand(1))) { 1097 Op2VK = TargetTransformInfo::OK_AnyValue; 1098 break; 1099 } 1100 if (i == 0) { 1101 CInt = cast<ConstantInt>(I->getOperand(1)); 1102 continue; 1103 } 1104 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1105 CInt != cast<ConstantInt>(I->getOperand(1))) 1106 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1107 } 1108 1109 ScalarCost = 1110 VecTy->getNumElements() * 1111 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); 1112 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); 1113 } 1114 return VecCost - ScalarCost; 1115 } 1116 case Instruction::Load: { 1117 // Cost of wide load - cost of scalar loads. 1118 int ScalarLdCost = VecTy->getNumElements() * 1119 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1120 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1121 return VecLdCost - ScalarLdCost; 1122 } 1123 case Instruction::Store: { 1124 // We know that we can merge the stores. Calculate the cost. 1125 int ScalarStCost = VecTy->getNumElements() * 1126 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1127 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1128 return VecStCost - ScalarStCost; 1129 } 1130 case Instruction::Call: { 1131 CallInst *CI = cast<CallInst>(VL0); 1132 IntrinsicInst *II = cast<IntrinsicInst>(CI); 1133 Intrinsic::ID ID = II->getIntrinsicID(); 1134 1135 // Calculate the cost of the scalar and vector calls. 1136 SmallVector<Type*, 4> ScalarTys, VecTys; 1137 for (unsigned op = 0, opc = II->getNumArgOperands(); op!= opc; ++op) { 1138 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1139 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1140 VecTy->getNumElements())); 1141 } 1142 1143 int ScalarCallCost = VecTy->getNumElements() * 1144 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1145 1146 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1147 1148 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1149 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1150 << " for " << *II << "\n"); 1151 1152 return VecCallCost - ScalarCallCost; 1153 } 1154 default: 1155 llvm_unreachable("Unknown instruction"); 1156 } 1157} 1158 1159bool BoUpSLP::isFullyVectorizableTinyTree() { 1160 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1161 VectorizableTree.size() << " is fully vectorizable .\n"); 1162 1163 // We only handle trees of height 2. 1164 if (VectorizableTree.size() != 2) 1165 return false; 1166 1167 // Handle splat stores. 1168 if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars)) 1169 return true; 1170 1171 // Gathering cost would be too much for tiny trees. 1172 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1173 return false; 1174 1175 return true; 1176} 1177 1178int BoUpSLP::getTreeCost() { 1179 int Cost = 0; 1180 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1181 VectorizableTree.size() << ".\n"); 1182 1183 // We only vectorize tiny trees if it is fully vectorizable. 1184 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1185 if (!VectorizableTree.size()) { 1186 assert(!ExternalUses.size() && "We should not have any external users"); 1187 } 1188 return INT_MAX; 1189 } 1190 1191 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1192 1193 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1194 int C = getEntryCost(&VectorizableTree[i]); 1195 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1196 << *VectorizableTree[i].Scalars[0] << " .\n"); 1197 Cost += C; 1198 } 1199 1200 SmallSet<Value *, 16> ExtractCostCalculated; 1201 int ExtractCost = 0; 1202 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1203 I != E; ++I) { 1204 // We only add extract cost once for the same scalar. 1205 if (!ExtractCostCalculated.insert(I->Scalar)) 1206 continue; 1207 1208 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1209 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1210 I->Lane); 1211 } 1212 1213 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1214 return Cost + ExtractCost; 1215} 1216 1217int BoUpSLP::getGatherCost(Type *Ty) { 1218 int Cost = 0; 1219 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1220 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1221 return Cost; 1222} 1223 1224int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1225 // Find the type of the operands in VL. 1226 Type *ScalarTy = VL[0]->getType(); 1227 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1228 ScalarTy = SI->getValueOperand()->getType(); 1229 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1230 // Find the cost of inserting/extracting values from the vector. 1231 return getGatherCost(VecTy); 1232} 1233 1234AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 1235 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1236 return AA->getLocation(SI); 1237 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1238 return AA->getLocation(LI); 1239 return AliasAnalysis::Location(); 1240} 1241 1242Value *BoUpSLP::getPointerOperand(Value *I) { 1243 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1244 return LI->getPointerOperand(); 1245 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1246 return SI->getPointerOperand(); 1247 return 0; 1248} 1249 1250unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1251 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1252 return L->getPointerAddressSpace(); 1253 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1254 return S->getPointerAddressSpace(); 1255 return -1; 1256} 1257 1258bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1259 Value *PtrA = getPointerOperand(A); 1260 Value *PtrB = getPointerOperand(B); 1261 unsigned ASA = getAddressSpaceOperand(A); 1262 unsigned ASB = getAddressSpaceOperand(B); 1263 1264 // Check that the address spaces match and that the pointers are valid. 1265 if (!PtrA || !PtrB || (ASA != ASB)) 1266 return false; 1267 1268 // Make sure that A and B are different pointers of the same type. 1269 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1270 return false; 1271 1272 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1273 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1274 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1275 1276 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1277 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1278 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1279 1280 APInt OffsetDelta = OffsetB - OffsetA; 1281 1282 // Check if they are based on the same pointer. That makes the offsets 1283 // sufficient. 1284 if (PtrA == PtrB) 1285 return OffsetDelta == Size; 1286 1287 // Compute the necessary base pointer delta to have the necessary final delta 1288 // equal to the size. 1289 APInt BaseDelta = Size - OffsetDelta; 1290 1291 // Otherwise compute the distance with SCEV between the base pointers. 1292 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1293 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1294 const SCEV *C = SE->getConstant(BaseDelta); 1295 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1296 return X == PtrSCEVB; 1297} 1298 1299Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1300 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1301 BasicBlock::iterator I = Src, E = Dst; 1302 /// Scan all of the instruction from SRC to DST and check if 1303 /// the source may alias. 1304 for (++I; I != E; ++I) { 1305 // Ignore store instructions that are marked as 'ignore'. 1306 if (MemBarrierIgnoreList.count(I)) 1307 continue; 1308 if (Src->mayWriteToMemory()) /* Write */ { 1309 if (!I->mayReadOrWriteMemory()) 1310 continue; 1311 } else /* Read */ { 1312 if (!I->mayWriteToMemory()) 1313 continue; 1314 } 1315 AliasAnalysis::Location A = getLocation(&*I); 1316 AliasAnalysis::Location B = getLocation(Src); 1317 1318 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1319 return I; 1320 } 1321 return 0; 1322} 1323 1324int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1325 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1326 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1327 BlockNumbering &BN = BlocksNumbers[BB]; 1328 1329 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1330 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1331 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1332 return MaxIdx; 1333} 1334 1335Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1336 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1337 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1338 BlockNumbering &BN = BlocksNumbers[BB]; 1339 1340 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1341 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1342 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1343 Instruction *I = BN.getInstruction(MaxIdx); 1344 assert(I && "bad location"); 1345 return I; 1346} 1347 1348void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1349 Instruction *VL0 = cast<Instruction>(VL[0]); 1350 Instruction *LastInst = getLastInstruction(VL); 1351 BasicBlock::iterator NextInst = LastInst; 1352 ++NextInst; 1353 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1354 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1355} 1356 1357Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1358 Value *Vec = UndefValue::get(Ty); 1359 // Generate the 'InsertElement' instruction. 1360 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1361 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1362 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1363 GatherSeq.insert(Insrt); 1364 CSEBlocks.insert(Insrt->getParent()); 1365 1366 // Add to our 'need-to-extract' list. 1367 if (ScalarToTreeEntry.count(VL[i])) { 1368 int Idx = ScalarToTreeEntry[VL[i]]; 1369 TreeEntry *E = &VectorizableTree[Idx]; 1370 // Find which lane we need to extract. 1371 int FoundLane = -1; 1372 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1373 // Is this the lane of the scalar that we are looking for ? 1374 if (E->Scalars[Lane] == VL[i]) { 1375 FoundLane = Lane; 1376 break; 1377 } 1378 } 1379 assert(FoundLane >= 0 && "Could not find the correct lane"); 1380 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1381 } 1382 } 1383 } 1384 1385 return Vec; 1386} 1387 1388Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1389 SmallDenseMap<Value*, int>::const_iterator Entry 1390 = ScalarToTreeEntry.find(VL[0]); 1391 if (Entry != ScalarToTreeEntry.end()) { 1392 int Idx = Entry->second; 1393 const TreeEntry *En = &VectorizableTree[Idx]; 1394 if (En->isSame(VL) && En->VectorizedValue) 1395 return En->VectorizedValue; 1396 } 1397 return 0; 1398} 1399 1400Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1401 if (ScalarToTreeEntry.count(VL[0])) { 1402 int Idx = ScalarToTreeEntry[VL[0]]; 1403 TreeEntry *E = &VectorizableTree[Idx]; 1404 if (E->isSame(VL)) 1405 return vectorizeTree(E); 1406 } 1407 1408 Type *ScalarTy = VL[0]->getType(); 1409 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1410 ScalarTy = SI->getValueOperand()->getType(); 1411 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1412 1413 return Gather(VL, VecTy); 1414} 1415 1416Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1417 IRBuilder<>::InsertPointGuard Guard(Builder); 1418 1419 if (E->VectorizedValue) { 1420 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1421 return E->VectorizedValue; 1422 } 1423 1424 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1425 Type *ScalarTy = VL0->getType(); 1426 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1427 ScalarTy = SI->getValueOperand()->getType(); 1428 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1429 1430 if (E->NeedToGather) { 1431 setInsertPointAfterBundle(E->Scalars); 1432 return Gather(E->Scalars, VecTy); 1433 } 1434 1435 unsigned Opcode = VL0->getOpcode(); 1436 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode"); 1437 1438 switch (Opcode) { 1439 case Instruction::PHI: { 1440 PHINode *PH = dyn_cast<PHINode>(VL0); 1441 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1442 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1443 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1444 E->VectorizedValue = NewPhi; 1445 1446 // PHINodes may have multiple entries from the same block. We want to 1447 // visit every block once. 1448 SmallSet<BasicBlock*, 4> VisitedBBs; 1449 1450 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1451 ValueList Operands; 1452 BasicBlock *IBB = PH->getIncomingBlock(i); 1453 1454 if (!VisitedBBs.insert(IBB)) { 1455 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1456 continue; 1457 } 1458 1459 // Prepare the operand vector. 1460 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1461 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1462 getIncomingValueForBlock(IBB)); 1463 1464 Builder.SetInsertPoint(IBB->getTerminator()); 1465 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1466 Value *Vec = vectorizeTree(Operands); 1467 NewPhi->addIncoming(Vec, IBB); 1468 } 1469 1470 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1471 "Invalid number of incoming values"); 1472 return NewPhi; 1473 } 1474 1475 case Instruction::ExtractElement: { 1476 if (CanReuseExtract(E->Scalars)) { 1477 Value *V = VL0->getOperand(0); 1478 E->VectorizedValue = V; 1479 return V; 1480 } 1481 return Gather(E->Scalars, VecTy); 1482 } 1483 case Instruction::ZExt: 1484 case Instruction::SExt: 1485 case Instruction::FPToUI: 1486 case Instruction::FPToSI: 1487 case Instruction::FPExt: 1488 case Instruction::PtrToInt: 1489 case Instruction::IntToPtr: 1490 case Instruction::SIToFP: 1491 case Instruction::UIToFP: 1492 case Instruction::Trunc: 1493 case Instruction::FPTrunc: 1494 case Instruction::BitCast: { 1495 ValueList INVL; 1496 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1497 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1498 1499 setInsertPointAfterBundle(E->Scalars); 1500 1501 Value *InVec = vectorizeTree(INVL); 1502 1503 if (Value *V = alreadyVectorized(E->Scalars)) 1504 return V; 1505 1506 CastInst *CI = dyn_cast<CastInst>(VL0); 1507 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1508 E->VectorizedValue = V; 1509 return V; 1510 } 1511 case Instruction::FCmp: 1512 case Instruction::ICmp: { 1513 ValueList LHSV, RHSV; 1514 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1515 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1516 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1517 } 1518 1519 setInsertPointAfterBundle(E->Scalars); 1520 1521 Value *L = vectorizeTree(LHSV); 1522 Value *R = vectorizeTree(RHSV); 1523 1524 if (Value *V = alreadyVectorized(E->Scalars)) 1525 return V; 1526 1527 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1528 Value *V; 1529 if (Opcode == Instruction::FCmp) 1530 V = Builder.CreateFCmp(P0, L, R); 1531 else 1532 V = Builder.CreateICmp(P0, L, R); 1533 1534 E->VectorizedValue = V; 1535 return V; 1536 } 1537 case Instruction::Select: { 1538 ValueList TrueVec, FalseVec, CondVec; 1539 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1540 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1541 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1542 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1543 } 1544 1545 setInsertPointAfterBundle(E->Scalars); 1546 1547 Value *Cond = vectorizeTree(CondVec); 1548 Value *True = vectorizeTree(TrueVec); 1549 Value *False = vectorizeTree(FalseVec); 1550 1551 if (Value *V = alreadyVectorized(E->Scalars)) 1552 return V; 1553 1554 Value *V = Builder.CreateSelect(Cond, True, False); 1555 E->VectorizedValue = V; 1556 return V; 1557 } 1558 case Instruction::Add: 1559 case Instruction::FAdd: 1560 case Instruction::Sub: 1561 case Instruction::FSub: 1562 case Instruction::Mul: 1563 case Instruction::FMul: 1564 case Instruction::UDiv: 1565 case Instruction::SDiv: 1566 case Instruction::FDiv: 1567 case Instruction::URem: 1568 case Instruction::SRem: 1569 case Instruction::FRem: 1570 case Instruction::Shl: 1571 case Instruction::LShr: 1572 case Instruction::AShr: 1573 case Instruction::And: 1574 case Instruction::Or: 1575 case Instruction::Xor: { 1576 ValueList LHSVL, RHSVL; 1577 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1578 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1579 else 1580 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1581 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1582 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1583 } 1584 1585 setInsertPointAfterBundle(E->Scalars); 1586 1587 Value *LHS = vectorizeTree(LHSVL); 1588 Value *RHS = vectorizeTree(RHSVL); 1589 1590 if (LHS == RHS && isa<Instruction>(LHS)) { 1591 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1592 } 1593 1594 if (Value *V = alreadyVectorized(E->Scalars)) 1595 return V; 1596 1597 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1598 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1599 E->VectorizedValue = V; 1600 1601 if (Instruction *I = dyn_cast<Instruction>(V)) 1602 return propagateMetadata(I, E->Scalars); 1603 1604 return V; 1605 } 1606 case Instruction::Load: { 1607 // Loads are inserted at the head of the tree because we don't want to 1608 // sink them all the way down past store instructions. 1609 setInsertPointAfterBundle(E->Scalars); 1610 1611 LoadInst *LI = cast<LoadInst>(VL0); 1612 unsigned AS = LI->getPointerAddressSpace(); 1613 1614 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 1615 VecTy->getPointerTo(AS)); 1616 unsigned Alignment = LI->getAlignment(); 1617 LI = Builder.CreateLoad(VecPtr); 1618 LI->setAlignment(Alignment); 1619 E->VectorizedValue = LI; 1620 return propagateMetadata(LI, E->Scalars); 1621 } 1622 case Instruction::Store: { 1623 StoreInst *SI = cast<StoreInst>(VL0); 1624 unsigned Alignment = SI->getAlignment(); 1625 unsigned AS = SI->getPointerAddressSpace(); 1626 1627 ValueList ValueOp; 1628 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1629 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1630 1631 setInsertPointAfterBundle(E->Scalars); 1632 1633 Value *VecValue = vectorizeTree(ValueOp); 1634 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 1635 VecTy->getPointerTo(AS)); 1636 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1637 S->setAlignment(Alignment); 1638 E->VectorizedValue = S; 1639 return propagateMetadata(S, E->Scalars); 1640 } 1641 case Instruction::Call: { 1642 CallInst *CI = cast<CallInst>(VL0); 1643 1644 setInsertPointAfterBundle(E->Scalars); 1645 std::vector<Value *> OpVecs; 1646 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 1647 ValueList OpVL; 1648 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1649 CallInst *CEI = cast<CallInst>(E->Scalars[i]); 1650 OpVL.push_back(CEI->getArgOperand(j)); 1651 } 1652 1653 Value *OpVec = vectorizeTree(OpVL); 1654 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 1655 OpVecs.push_back(OpVec); 1656 } 1657 1658 Module *M = F->getParent(); 1659 IntrinsicInst *II = cast<IntrinsicInst>(CI); 1660 Intrinsic::ID ID = II->getIntrinsicID(); 1661 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 1662 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 1663 Value *V = Builder.CreateCall(CF, OpVecs); 1664 E->VectorizedValue = V; 1665 return V; 1666 } 1667 default: 1668 llvm_unreachable("unknown inst"); 1669 } 1670 return 0; 1671} 1672 1673Value *BoUpSLP::vectorizeTree() { 1674 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1675 vectorizeTree(&VectorizableTree[0]); 1676 1677 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1678 1679 // Extract all of the elements with the external uses. 1680 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1681 it != e; ++it) { 1682 Value *Scalar = it->Scalar; 1683 llvm::User *User = it->User; 1684 1685 // Skip users that we already RAUW. This happens when one instruction 1686 // has multiple uses of the same value. 1687 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 1688 Scalar->user_end()) 1689 continue; 1690 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1691 1692 int Idx = ScalarToTreeEntry[Scalar]; 1693 TreeEntry *E = &VectorizableTree[Idx]; 1694 assert(!E->NeedToGather && "Extracting from a gather list"); 1695 1696 Value *Vec = E->VectorizedValue; 1697 assert(Vec && "Can't find vectorizable value"); 1698 1699 Value *Lane = Builder.getInt32(it->Lane); 1700 // Generate extracts for out-of-tree users. 1701 // Find the insertion point for the extractelement lane. 1702 if (isa<Instruction>(Vec)){ 1703 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1704 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1705 if (PH->getIncomingValue(i) == Scalar) { 1706 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1707 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1708 CSEBlocks.insert(PH->getIncomingBlock(i)); 1709 PH->setOperand(i, Ex); 1710 } 1711 } 1712 } else { 1713 Builder.SetInsertPoint(cast<Instruction>(User)); 1714 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1715 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 1716 User->replaceUsesOfWith(Scalar, Ex); 1717 } 1718 } else { 1719 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1720 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1721 CSEBlocks.insert(&F->getEntryBlock()); 1722 User->replaceUsesOfWith(Scalar, Ex); 1723 } 1724 1725 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1726 } 1727 1728 // For each vectorized value: 1729 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 1730 TreeEntry *Entry = &VectorizableTree[EIdx]; 1731 1732 // For each lane: 1733 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1734 Value *Scalar = Entry->Scalars[Lane]; 1735 1736 // No need to handle users of gathered values. 1737 if (Entry->NeedToGather) 1738 continue; 1739 1740 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 1741 1742 Type *Ty = Scalar->getType(); 1743 if (!Ty->isVoidTy()) { 1744#ifndef NDEBUG 1745 for (User *U : Scalar->users()) { 1746 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 1747 1748 assert((ScalarToTreeEntry.count(U) || 1749 // It is legal to replace the reduction users by undef. 1750 (RdxOps && RdxOps->count(U))) && 1751 "Replacing out-of-tree value with undef"); 1752 } 1753#endif 1754 Value *Undef = UndefValue::get(Ty); 1755 Scalar->replaceAllUsesWith(Undef); 1756 } 1757 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 1758 cast<Instruction>(Scalar)->eraseFromParent(); 1759 } 1760 } 1761 1762 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 1763 BlocksNumbers[it].forget(); 1764 } 1765 Builder.ClearInsertionPoint(); 1766 1767 return VectorizableTree[0].VectorizedValue; 1768} 1769 1770void BoUpSLP::optimizeGatherSequence() { 1771 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 1772 << " gather sequences instructions.\n"); 1773 // LICM InsertElementInst sequences. 1774 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 1775 e = GatherSeq.end(); it != e; ++it) { 1776 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 1777 1778 if (!Insert) 1779 continue; 1780 1781 // Check if this block is inside a loop. 1782 Loop *L = LI->getLoopFor(Insert->getParent()); 1783 if (!L) 1784 continue; 1785 1786 // Check if it has a preheader. 1787 BasicBlock *PreHeader = L->getLoopPreheader(); 1788 if (!PreHeader) 1789 continue; 1790 1791 // If the vector or the element that we insert into it are 1792 // instructions that are defined in this basic block then we can't 1793 // hoist this instruction. 1794 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 1795 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 1796 if (CurrVec && L->contains(CurrVec)) 1797 continue; 1798 if (NewElem && L->contains(NewElem)) 1799 continue; 1800 1801 // We can hoist this instruction. Move it to the pre-header. 1802 Insert->moveBefore(PreHeader->getTerminator()); 1803 } 1804 1805 // Sort blocks by domination. This ensures we visit a block after all blocks 1806 // dominating it are visited. 1807 SmallVector<BasicBlock *, 8> CSEWorkList(CSEBlocks.begin(), CSEBlocks.end()); 1808 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 1809 [this](const BasicBlock *A, const BasicBlock *B) { 1810 return DT->properlyDominates(A, B); 1811 }); 1812 1813 // Perform O(N^2) search over the gather sequences and merge identical 1814 // instructions. TODO: We can further optimize this scan if we split the 1815 // instructions into different buckets based on the insert lane. 1816 SmallVector<Instruction *, 16> Visited; 1817 for (SmallVectorImpl<BasicBlock *>::iterator I = CSEWorkList.begin(), 1818 E = CSEWorkList.end(); 1819 I != E; ++I) { 1820 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 1821 "Worklist not sorted properly!"); 1822 BasicBlock *BB = *I; 1823 // For all instructions in blocks containing gather sequences: 1824 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 1825 Instruction *In = it++; 1826 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 1827 continue; 1828 1829 // Check if we can replace this instruction with any of the 1830 // visited instructions. 1831 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 1832 ve = Visited.end(); 1833 v != ve; ++v) { 1834 if (In->isIdenticalTo(*v) && 1835 DT->dominates((*v)->getParent(), In->getParent())) { 1836 In->replaceAllUsesWith(*v); 1837 In->eraseFromParent(); 1838 In = 0; 1839 break; 1840 } 1841 } 1842 if (In) { 1843 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 1844 Visited.push_back(In); 1845 } 1846 } 1847 } 1848 CSEBlocks.clear(); 1849 GatherSeq.clear(); 1850} 1851 1852/// The SLPVectorizer Pass. 1853struct SLPVectorizer : public FunctionPass { 1854 typedef SmallVector<StoreInst *, 8> StoreList; 1855 typedef MapVector<Value *, StoreList> StoreListMap; 1856 1857 /// Pass identification, replacement for typeid 1858 static char ID; 1859 1860 explicit SLPVectorizer() : FunctionPass(ID) { 1861 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 1862 } 1863 1864 ScalarEvolution *SE; 1865 const DataLayout *DL; 1866 TargetTransformInfo *TTI; 1867 AliasAnalysis *AA; 1868 LoopInfo *LI; 1869 DominatorTree *DT; 1870 1871 bool runOnFunction(Function &F) override { 1872 if (skipOptnoneFunction(F)) 1873 return false; 1874 1875 SE = &getAnalysis<ScalarEvolution>(); 1876 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 1877 DL = DLP ? &DLP->getDataLayout() : 0; 1878 TTI = &getAnalysis<TargetTransformInfo>(); 1879 AA = &getAnalysis<AliasAnalysis>(); 1880 LI = &getAnalysis<LoopInfo>(); 1881 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1882 1883 StoreRefs.clear(); 1884 bool Changed = false; 1885 1886 // If the target claims to have no vector registers don't attempt 1887 // vectorization. 1888 if (!TTI->getNumberOfRegisters(true)) 1889 return false; 1890 1891 // Must have DataLayout. We can't require it because some tests run w/o 1892 // triple. 1893 if (!DL) 1894 return false; 1895 1896 // Don't vectorize when the attribute NoImplicitFloat is used. 1897 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 1898 return false; 1899 1900 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 1901 1902 // Use the bottom up slp vectorizer to construct chains that start with 1903 // he store instructions. 1904 BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT); 1905 1906 // Scan the blocks in the function in post order. 1907 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 1908 e = po_end(&F.getEntryBlock()); it != e; ++it) { 1909 BasicBlock *BB = *it; 1910 1911 // Vectorize trees that end at stores. 1912 if (unsigned count = collectStores(BB, R)) { 1913 (void)count; 1914 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 1915 Changed |= vectorizeStoreChains(R); 1916 } 1917 1918 // Vectorize trees that end at reductions. 1919 Changed |= vectorizeChainsInBlock(BB, R); 1920 } 1921 1922 if (Changed) { 1923 R.optimizeGatherSequence(); 1924 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 1925 DEBUG(verifyFunction(F)); 1926 } 1927 return Changed; 1928 } 1929 1930 void getAnalysisUsage(AnalysisUsage &AU) const override { 1931 FunctionPass::getAnalysisUsage(AU); 1932 AU.addRequired<ScalarEvolution>(); 1933 AU.addRequired<AliasAnalysis>(); 1934 AU.addRequired<TargetTransformInfo>(); 1935 AU.addRequired<LoopInfo>(); 1936 AU.addRequired<DominatorTreeWrapperPass>(); 1937 AU.addPreserved<LoopInfo>(); 1938 AU.addPreserved<DominatorTreeWrapperPass>(); 1939 AU.setPreservesCFG(); 1940 } 1941 1942private: 1943 1944 /// \brief Collect memory references and sort them according to their base 1945 /// object. We sort the stores to their base objects to reduce the cost of the 1946 /// quadratic search on the stores. TODO: We can further reduce this cost 1947 /// if we flush the chain creation every time we run into a memory barrier. 1948 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 1949 1950 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 1951 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 1952 1953 /// \brief Try to vectorize a list of operands. 1954 /// \returns true if a value was vectorized. 1955 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R); 1956 1957 /// \brief Try to vectorize a chain that may start at the operands of \V; 1958 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 1959 1960 /// \brief Vectorize the stores that were collected in StoreRefs. 1961 bool vectorizeStoreChains(BoUpSLP &R); 1962 1963 /// \brief Scan the basic block and look for patterns that are likely to start 1964 /// a vectorization chain. 1965 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 1966 1967 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 1968 BoUpSLP &R); 1969 1970 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 1971 BoUpSLP &R); 1972private: 1973 StoreListMap StoreRefs; 1974}; 1975 1976/// \brief Check that the Values in the slice in VL array are still existent in 1977/// the WeakVH array. 1978/// Vectorization of part of the VL array may cause later values in the VL array 1979/// to become invalid. We track when this has happened in the WeakVH array. 1980static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 1981 SmallVectorImpl<WeakVH> &VH, 1982 unsigned SliceBegin, 1983 unsigned SliceSize) { 1984 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 1985 if (VH[i] != VL[i]) 1986 return true; 1987 1988 return false; 1989} 1990 1991bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 1992 int CostThreshold, BoUpSLP &R) { 1993 unsigned ChainLen = Chain.size(); 1994 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 1995 << "\n"); 1996 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 1997 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 1998 unsigned VF = MinVecRegSize / Sz; 1999 2000 if (!isPowerOf2_32(Sz) || VF < 2) 2001 return false; 2002 2003 // Keep track of values that were deleted by vectorizing in the loop below. 2004 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 2005 2006 bool Changed = false; 2007 // Look for profitable vectorizable trees at all offsets, starting at zero. 2008 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 2009 if (i + VF > e) 2010 break; 2011 2012 // Check that a previous iteration of this loop did not delete the Value. 2013 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 2014 continue; 2015 2016 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 2017 << "\n"); 2018 ArrayRef<Value *> Operands = Chain.slice(i, VF); 2019 2020 R.buildTree(Operands); 2021 2022 int Cost = R.getTreeCost(); 2023 2024 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 2025 if (Cost < CostThreshold) { 2026 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 2027 R.vectorizeTree(); 2028 2029 // Move to the next bundle. 2030 i += VF - 1; 2031 Changed = true; 2032 } 2033 } 2034 2035 return Changed; 2036} 2037 2038bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 2039 int costThreshold, BoUpSLP &R) { 2040 SetVector<Value *> Heads, Tails; 2041 SmallDenseMap<Value *, Value *> ConsecutiveChain; 2042 2043 // We may run into multiple chains that merge into a single chain. We mark the 2044 // stores that we vectorized so that we don't visit the same store twice. 2045 BoUpSLP::ValueSet VectorizedStores; 2046 bool Changed = false; 2047 2048 // Do a quadratic search on all of the given stores and find 2049 // all of the pairs of stores that follow each other. 2050 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 2051 for (unsigned j = 0; j < e; ++j) { 2052 if (i == j) 2053 continue; 2054 2055 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 2056 Tails.insert(Stores[j]); 2057 Heads.insert(Stores[i]); 2058 ConsecutiveChain[Stores[i]] = Stores[j]; 2059 } 2060 } 2061 } 2062 2063 // For stores that start but don't end a link in the chain: 2064 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 2065 it != e; ++it) { 2066 if (Tails.count(*it)) 2067 continue; 2068 2069 // We found a store instr that starts a chain. Now follow the chain and try 2070 // to vectorize it. 2071 BoUpSLP::ValueList Operands; 2072 Value *I = *it; 2073 // Collect the chain into a list. 2074 while (Tails.count(I) || Heads.count(I)) { 2075 if (VectorizedStores.count(I)) 2076 break; 2077 Operands.push_back(I); 2078 // Move to the next value in the chain. 2079 I = ConsecutiveChain[I]; 2080 } 2081 2082 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 2083 2084 // Mark the vectorized stores so that we don't vectorize them again. 2085 if (Vectorized) 2086 VectorizedStores.insert(Operands.begin(), Operands.end()); 2087 Changed |= Vectorized; 2088 } 2089 2090 return Changed; 2091} 2092 2093 2094unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 2095 unsigned count = 0; 2096 StoreRefs.clear(); 2097 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 2098 StoreInst *SI = dyn_cast<StoreInst>(it); 2099 if (!SI) 2100 continue; 2101 2102 // Don't touch volatile stores. 2103 if (!SI->isSimple()) 2104 continue; 2105 2106 // Check that the pointer points to scalars. 2107 Type *Ty = SI->getValueOperand()->getType(); 2108 if (Ty->isAggregateType() || Ty->isVectorTy()) 2109 return 0; 2110 2111 // Find the base pointer. 2112 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 2113 2114 // Save the store locations. 2115 StoreRefs[Ptr].push_back(SI); 2116 count++; 2117 } 2118 return count; 2119} 2120 2121bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 2122 if (!A || !B) 2123 return false; 2124 Value *VL[] = { A, B }; 2125 return tryToVectorizeList(VL, R); 2126} 2127 2128bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) { 2129 if (VL.size() < 2) 2130 return false; 2131 2132 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 2133 2134 // Check that all of the parts are scalar instructions of the same type. 2135 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 2136 if (!I0) 2137 return false; 2138 2139 unsigned Opcode0 = I0->getOpcode(); 2140 2141 Type *Ty0 = I0->getType(); 2142 unsigned Sz = DL->getTypeSizeInBits(Ty0); 2143 unsigned VF = MinVecRegSize / Sz; 2144 2145 for (int i = 0, e = VL.size(); i < e; ++i) { 2146 Type *Ty = VL[i]->getType(); 2147 if (Ty->isAggregateType() || Ty->isVectorTy()) 2148 return false; 2149 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 2150 if (!Inst || Inst->getOpcode() != Opcode0) 2151 return false; 2152 } 2153 2154 bool Changed = false; 2155 2156 // Keep track of values that were delete by vectorizing in the loop below. 2157 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 2158 2159 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2160 unsigned OpsWidth = 0; 2161 2162 if (i + VF > e) 2163 OpsWidth = e - i; 2164 else 2165 OpsWidth = VF; 2166 2167 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 2168 break; 2169 2170 // Check that a previous iteration of this loop did not delete the Value. 2171 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 2172 continue; 2173 2174 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 2175 << "\n"); 2176 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 2177 2178 R.buildTree(Ops); 2179 int Cost = R.getTreeCost(); 2180 2181 if (Cost < -SLPCostThreshold) { 2182 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 2183 R.vectorizeTree(); 2184 2185 // Move to the next bundle. 2186 i += VF - 1; 2187 Changed = true; 2188 } 2189 } 2190 2191 return Changed; 2192} 2193 2194bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 2195 if (!V) 2196 return false; 2197 2198 // Try to vectorize V. 2199 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 2200 return true; 2201 2202 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 2203 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 2204 // Try to skip B. 2205 if (B && B->hasOneUse()) { 2206 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 2207 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 2208 if (tryToVectorizePair(A, B0, R)) { 2209 B->moveBefore(V); 2210 return true; 2211 } 2212 if (tryToVectorizePair(A, B1, R)) { 2213 B->moveBefore(V); 2214 return true; 2215 } 2216 } 2217 2218 // Try to skip A. 2219 if (A && A->hasOneUse()) { 2220 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 2221 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 2222 if (tryToVectorizePair(A0, B, R)) { 2223 A->moveBefore(V); 2224 return true; 2225 } 2226 if (tryToVectorizePair(A1, B, R)) { 2227 A->moveBefore(V); 2228 return true; 2229 } 2230 } 2231 return 0; 2232} 2233 2234/// \brief Generate a shuffle mask to be used in a reduction tree. 2235/// 2236/// \param VecLen The length of the vector to be reduced. 2237/// \param NumEltsToRdx The number of elements that should be reduced in the 2238/// vector. 2239/// \param IsPairwise Whether the reduction is a pairwise or splitting 2240/// reduction. A pairwise reduction will generate a mask of 2241/// <0,2,...> or <1,3,..> while a splitting reduction will generate 2242/// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 2243/// \param IsLeft True will generate a mask of even elements, odd otherwise. 2244static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 2245 bool IsPairwise, bool IsLeft, 2246 IRBuilder<> &Builder) { 2247 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 2248 2249 SmallVector<Constant *, 32> ShuffleMask( 2250 VecLen, UndefValue::get(Builder.getInt32Ty())); 2251 2252 if (IsPairwise) 2253 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 2254 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2255 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 2256 else 2257 // Move the upper half of the vector to the lower half. 2258 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2259 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 2260 2261 return ConstantVector::get(ShuffleMask); 2262} 2263 2264 2265/// Model horizontal reductions. 2266/// 2267/// A horizontal reduction is a tree of reduction operations (currently add and 2268/// fadd) that has operations that can be put into a vector as its leaf. 2269/// For example, this tree: 2270/// 2271/// mul mul mul mul 2272/// \ / \ / 2273/// + + 2274/// \ / 2275/// + 2276/// This tree has "mul" as its reduced values and "+" as its reduction 2277/// operations. A reduction might be feeding into a store or a binary operation 2278/// feeding a phi. 2279/// ... 2280/// \ / 2281/// + 2282/// | 2283/// phi += 2284/// 2285/// Or: 2286/// ... 2287/// \ / 2288/// + 2289/// | 2290/// *p = 2291/// 2292class HorizontalReduction { 2293 SmallPtrSet<Value *, 16> ReductionOps; 2294 SmallVector<Value *, 32> ReducedVals; 2295 2296 BinaryOperator *ReductionRoot; 2297 PHINode *ReductionPHI; 2298 2299 /// The opcode of the reduction. 2300 unsigned ReductionOpcode; 2301 /// The opcode of the values we perform a reduction on. 2302 unsigned ReducedValueOpcode; 2303 /// The width of one full horizontal reduction operation. 2304 unsigned ReduxWidth; 2305 /// Should we model this reduction as a pairwise reduction tree or a tree that 2306 /// splits the vector in halves and adds those halves. 2307 bool IsPairwiseReduction; 2308 2309public: 2310 HorizontalReduction() 2311 : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0), 2312 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 2313 2314 /// \brief Try to find a reduction tree. 2315 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 2316 const DataLayout *DL) { 2317 assert((!Phi || 2318 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 2319 "Thi phi needs to use the binary operator"); 2320 2321 // We could have a initial reductions that is not an add. 2322 // r *= v1 + v2 + v3 + v4 2323 // In such a case start looking for a tree rooted in the first '+'. 2324 if (Phi) { 2325 if (B->getOperand(0) == Phi) { 2326 Phi = 0; 2327 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 2328 } else if (B->getOperand(1) == Phi) { 2329 Phi = 0; 2330 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 2331 } 2332 } 2333 2334 if (!B) 2335 return false; 2336 2337 Type *Ty = B->getType(); 2338 if (Ty->isVectorTy()) 2339 return false; 2340 2341 ReductionOpcode = B->getOpcode(); 2342 ReducedValueOpcode = 0; 2343 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 2344 ReductionRoot = B; 2345 ReductionPHI = Phi; 2346 2347 if (ReduxWidth < 4) 2348 return false; 2349 2350 // We currently only support adds. 2351 if (ReductionOpcode != Instruction::Add && 2352 ReductionOpcode != Instruction::FAdd) 2353 return false; 2354 2355 // Post order traverse the reduction tree starting at B. We only handle true 2356 // trees containing only binary operators. 2357 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 2358 Stack.push_back(std::make_pair(B, 0)); 2359 while (!Stack.empty()) { 2360 BinaryOperator *TreeN = Stack.back().first; 2361 unsigned EdgeToVist = Stack.back().second++; 2362 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 2363 2364 // Only handle trees in the current basic block. 2365 if (TreeN->getParent() != B->getParent()) 2366 return false; 2367 2368 // Each tree node needs to have one user except for the ultimate 2369 // reduction. 2370 if (!TreeN->hasOneUse() && TreeN != B) 2371 return false; 2372 2373 // Postorder vist. 2374 if (EdgeToVist == 2 || IsReducedValue) { 2375 if (IsReducedValue) { 2376 // Make sure that the opcodes of the operations that we are going to 2377 // reduce match. 2378 if (!ReducedValueOpcode) 2379 ReducedValueOpcode = TreeN->getOpcode(); 2380 else if (ReducedValueOpcode != TreeN->getOpcode()) 2381 return false; 2382 ReducedVals.push_back(TreeN); 2383 } else { 2384 // We need to be able to reassociate the adds. 2385 if (!TreeN->isAssociative()) 2386 return false; 2387 ReductionOps.insert(TreeN); 2388 } 2389 // Retract. 2390 Stack.pop_back(); 2391 continue; 2392 } 2393 2394 // Visit left or right. 2395 Value *NextV = TreeN->getOperand(EdgeToVist); 2396 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 2397 if (Next) 2398 Stack.push_back(std::make_pair(Next, 0)); 2399 else if (NextV != Phi) 2400 return false; 2401 } 2402 return true; 2403 } 2404 2405 /// \brief Attempt to vectorize the tree found by 2406 /// matchAssociativeReduction. 2407 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 2408 if (ReducedVals.empty()) 2409 return false; 2410 2411 unsigned NumReducedVals = ReducedVals.size(); 2412 if (NumReducedVals < ReduxWidth) 2413 return false; 2414 2415 Value *VectorizedTree = 0; 2416 IRBuilder<> Builder(ReductionRoot); 2417 FastMathFlags Unsafe; 2418 Unsafe.setUnsafeAlgebra(); 2419 Builder.SetFastMathFlags(Unsafe); 2420 unsigned i = 0; 2421 2422 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 2423 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth); 2424 V.buildTree(ValsToReduce, &ReductionOps); 2425 2426 // Estimate cost. 2427 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 2428 if (Cost >= -SLPCostThreshold) 2429 break; 2430 2431 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 2432 << ". (HorRdx)\n"); 2433 2434 // Vectorize a tree. 2435 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 2436 Value *VectorizedRoot = V.vectorizeTree(); 2437 2438 // Emit a reduction. 2439 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 2440 if (VectorizedTree) { 2441 Builder.SetCurrentDebugLocation(Loc); 2442 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2443 ReducedSubTree, "bin.rdx"); 2444 } else 2445 VectorizedTree = ReducedSubTree; 2446 } 2447 2448 if (VectorizedTree) { 2449 // Finish the reduction. 2450 for (; i < NumReducedVals; ++i) { 2451 Builder.SetCurrentDebugLocation( 2452 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 2453 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2454 ReducedVals[i]); 2455 } 2456 // Update users. 2457 if (ReductionPHI) { 2458 assert(ReductionRoot != NULL && "Need a reduction operation"); 2459 ReductionRoot->setOperand(0, VectorizedTree); 2460 ReductionRoot->setOperand(1, ReductionPHI); 2461 } else 2462 ReductionRoot->replaceAllUsesWith(VectorizedTree); 2463 } 2464 return VectorizedTree != 0; 2465 } 2466 2467private: 2468 2469 /// \brief Calcuate the cost of a reduction. 2470 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 2471 Type *ScalarTy = FirstReducedVal->getType(); 2472 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 2473 2474 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 2475 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 2476 2477 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 2478 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 2479 2480 int ScalarReduxCost = 2481 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 2482 2483 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 2484 << " for reduction that starts with " << *FirstReducedVal 2485 << " (It is a " 2486 << (IsPairwiseReduction ? "pairwise" : "splitting") 2487 << " reduction)\n"); 2488 2489 return VecReduxCost - ScalarReduxCost; 2490 } 2491 2492 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 2493 Value *R, const Twine &Name = "") { 2494 if (Opcode == Instruction::FAdd) 2495 return Builder.CreateFAdd(L, R, Name); 2496 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 2497 } 2498 2499 /// \brief Emit a horizontal reduction of the vectorized value. 2500 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 2501 assert(VectorizedValue && "Need to have a vectorized tree node"); 2502 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 2503 assert(isPowerOf2_32(ReduxWidth) && 2504 "We only handle power-of-two reductions for now"); 2505 2506 Value *TmpVec = ValToReduce; 2507 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 2508 if (IsPairwiseReduction) { 2509 Value *LeftMask = 2510 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 2511 Value *RightMask = 2512 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 2513 2514 Value *LeftShuf = Builder.CreateShuffleVector( 2515 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 2516 Value *RightShuf = Builder.CreateShuffleVector( 2517 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 2518 "rdx.shuf.r"); 2519 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 2520 "bin.rdx"); 2521 } else { 2522 Value *UpperHalf = 2523 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 2524 Value *Shuf = Builder.CreateShuffleVector( 2525 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 2526 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 2527 } 2528 } 2529 2530 // The result is in the first element of the vector. 2531 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 2532 } 2533}; 2534 2535/// \brief Recognize construction of vectors like 2536/// %ra = insertelement <4 x float> undef, float %s0, i32 0 2537/// %rb = insertelement <4 x float> %ra, float %s1, i32 1 2538/// %rc = insertelement <4 x float> %rb, float %s2, i32 2 2539/// %rd = insertelement <4 x float> %rc, float %s3, i32 3 2540/// 2541/// Returns true if it matches 2542/// 2543static bool findBuildVector(InsertElementInst *IE, 2544 SmallVectorImpl<Value *> &Ops) { 2545 if (!isa<UndefValue>(IE->getOperand(0))) 2546 return false; 2547 2548 while (true) { 2549 Ops.push_back(IE->getOperand(1)); 2550 2551 if (IE->use_empty()) 2552 return false; 2553 2554 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 2555 if (!NextUse) 2556 return true; 2557 2558 // If this isn't the final use, make sure the next insertelement is the only 2559 // use. It's OK if the final constructed vector is used multiple times 2560 if (!IE->hasOneUse()) 2561 return false; 2562 2563 IE = NextUse; 2564 } 2565 2566 return false; 2567} 2568 2569static bool PhiTypeSorterFunc(Value *V, Value *V2) { 2570 return V->getType() < V2->getType(); 2571} 2572 2573bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 2574 bool Changed = false; 2575 SmallVector<Value *, 4> Incoming; 2576 SmallSet<Value *, 16> VisitedInstrs; 2577 2578 bool HaveVectorizedPhiNodes = true; 2579 while (HaveVectorizedPhiNodes) { 2580 HaveVectorizedPhiNodes = false; 2581 2582 // Collect the incoming values from the PHIs. 2583 Incoming.clear(); 2584 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 2585 ++instr) { 2586 PHINode *P = dyn_cast<PHINode>(instr); 2587 if (!P) 2588 break; 2589 2590 if (!VisitedInstrs.count(P)) 2591 Incoming.push_back(P); 2592 } 2593 2594 // Sort by type. 2595 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 2596 2597 // Try to vectorize elements base on their type. 2598 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 2599 E = Incoming.end(); 2600 IncIt != E;) { 2601 2602 // Look for the next elements with the same type. 2603 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 2604 while (SameTypeIt != E && 2605 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 2606 VisitedInstrs.insert(*SameTypeIt); 2607 ++SameTypeIt; 2608 } 2609 2610 // Try to vectorize them. 2611 unsigned NumElts = (SameTypeIt - IncIt); 2612 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 2613 if (NumElts > 1 && 2614 tryToVectorizeList(ArrayRef<Value *>(IncIt, NumElts), R)) { 2615 // Success start over because instructions might have been changed. 2616 HaveVectorizedPhiNodes = true; 2617 Changed = true; 2618 break; 2619 } 2620 2621 // Start over at the next instruction of a different type (or the end). 2622 IncIt = SameTypeIt; 2623 } 2624 } 2625 2626 VisitedInstrs.clear(); 2627 2628 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 2629 // We may go through BB multiple times so skip the one we have checked. 2630 if (!VisitedInstrs.insert(it)) 2631 continue; 2632 2633 if (isa<DbgInfoIntrinsic>(it)) 2634 continue; 2635 2636 // Try to vectorize reductions that use PHINodes. 2637 if (PHINode *P = dyn_cast<PHINode>(it)) { 2638 // Check that the PHI is a reduction PHI. 2639 if (P->getNumIncomingValues() != 2) 2640 return Changed; 2641 Value *Rdx = 2642 (P->getIncomingBlock(0) == BB 2643 ? (P->getIncomingValue(0)) 2644 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0)); 2645 // Check if this is a Binary Operator. 2646 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 2647 if (!BI) 2648 continue; 2649 2650 // Try to match and vectorize a horizontal reduction. 2651 HorizontalReduction HorRdx; 2652 if (ShouldVectorizeHor && 2653 HorRdx.matchAssociativeReduction(P, BI, DL) && 2654 HorRdx.tryToReduce(R, TTI)) { 2655 Changed = true; 2656 it = BB->begin(); 2657 e = BB->end(); 2658 continue; 2659 } 2660 2661 Value *Inst = BI->getOperand(0); 2662 if (Inst == P) 2663 Inst = BI->getOperand(1); 2664 2665 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 2666 // We would like to start over since some instructions are deleted 2667 // and the iterator may become invalid value. 2668 Changed = true; 2669 it = BB->begin(); 2670 e = BB->end(); 2671 continue; 2672 } 2673 2674 continue; 2675 } 2676 2677 // Try to vectorize horizontal reductions feeding into a store. 2678 if (ShouldStartVectorizeHorAtStore) 2679 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 2680 if (BinaryOperator *BinOp = 2681 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 2682 HorizontalReduction HorRdx; 2683 if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) && 2684 HorRdx.tryToReduce(R, TTI)) || 2685 tryToVectorize(BinOp, R))) { 2686 Changed = true; 2687 it = BB->begin(); 2688 e = BB->end(); 2689 continue; 2690 } 2691 } 2692 2693 // Try to vectorize trees that start at compare instructions. 2694 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 2695 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 2696 Changed = true; 2697 // We would like to start over since some instructions are deleted 2698 // and the iterator may become invalid value. 2699 it = BB->begin(); 2700 e = BB->end(); 2701 continue; 2702 } 2703 2704 for (int i = 0; i < 2; ++i) { 2705 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 2706 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 2707 Changed = true; 2708 // We would like to start over since some instructions are deleted 2709 // and the iterator may become invalid value. 2710 it = BB->begin(); 2711 e = BB->end(); 2712 } 2713 } 2714 } 2715 continue; 2716 } 2717 2718 // Try to vectorize trees that start at insertelement instructions. 2719 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) { 2720 SmallVector<Value *, 8> Ops; 2721 if (!findBuildVector(IE, Ops)) 2722 continue; 2723 2724 if (tryToVectorizeList(Ops, R)) { 2725 Changed = true; 2726 it = BB->begin(); 2727 e = BB->end(); 2728 } 2729 2730 continue; 2731 } 2732 } 2733 2734 return Changed; 2735} 2736 2737bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 2738 bool Changed = false; 2739 // Attempt to sort and vectorize each of the store-groups. 2740 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 2741 it != e; ++it) { 2742 if (it->second.size() < 2) 2743 continue; 2744 2745 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 2746 << it->second.size() << ".\n"); 2747 2748 // Process the stores in chunks of 16. 2749 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 2750 unsigned Len = std::min<unsigned>(CE - CI, 16); 2751 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 2752 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 2753 } 2754 } 2755 return Changed; 2756} 2757 2758} // end anonymous namespace 2759 2760char SLPVectorizer::ID = 0; 2761static const char lv_name[] = "SLP Vectorizer"; 2762INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 2763INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 2764INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 2765INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 2766INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 2767INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 2768 2769namespace llvm { 2770Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 2771} 2772