SLPVectorizer.cpp revision 7f6926930f48234484167e9ecce90f627a030702
1//===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10// stores that can be put together into vector-stores. Next, it attempts to 11// construct vectorizable tree using the use-def chains. If a profitable tree 12// was found, the SLP vectorizer performs vectorization on the tree. 13// 14// The pass is inspired by the work described in the paper: 15// "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16// 17//===----------------------------------------------------------------------===// 18#define SV_NAME "slp-vectorizer" 19#define DEBUG_TYPE "SLP" 20 21#include "llvm/Transforms/Vectorize.h" 22#include "llvm/ADT/MapVector.h" 23#include "llvm/ADT/PostOrderIterator.h" 24#include "llvm/ADT/SetVector.h" 25#include "llvm/Analysis/AliasAnalysis.h" 26#include "llvm/Analysis/ScalarEvolution.h" 27#include "llvm/Analysis/ScalarEvolutionExpressions.h" 28#include "llvm/Analysis/TargetTransformInfo.h" 29#include "llvm/Analysis/ValueTracking.h" 30#include "llvm/Analysis/Verifier.h" 31#include "llvm/Analysis/LoopInfo.h" 32#include "llvm/IR/DataLayout.h" 33#include "llvm/IR/Instructions.h" 34#include "llvm/IR/IntrinsicInst.h" 35#include "llvm/IR/IRBuilder.h" 36#include "llvm/IR/Module.h" 37#include "llvm/IR/Type.h" 38#include "llvm/IR/Value.h" 39#include "llvm/Pass.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/Debug.h" 42#include "llvm/Support/raw_ostream.h" 43#include <algorithm> 44#include <map> 45 46using namespace llvm; 47 48static cl::opt<int> 49 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 50 cl::desc("Only vectorize if you gain more than this " 51 "number ")); 52 53static cl::opt<bool> 54ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 55 cl::desc("Attempt to vectorize horizontal reductions")); 56 57static cl::opt<bool> ShouldStartVectorizeHorAtStore( 58 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 59 cl::desc( 60 "Attempt to vectorize horizontal reductions feeding into a store")); 61 62namespace { 63 64static const unsigned MinVecRegSize = 128; 65 66static const unsigned RecursionMaxDepth = 12; 67 68/// A helper class for numbering instructions in multiple blocks. 69/// Numbers start at zero for each basic block. 70struct BlockNumbering { 71 72 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 73 74 BlockNumbering() : BB(0), Valid(false) {} 75 76 void numberInstructions() { 77 unsigned Loc = 0; 78 InstrIdx.clear(); 79 InstrVec.clear(); 80 // Number the instructions in the block. 81 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 82 InstrIdx[it] = Loc++; 83 InstrVec.push_back(it); 84 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 85 } 86 Valid = true; 87 } 88 89 int getIndex(Instruction *I) { 90 assert(I->getParent() == BB && "Invalid instruction"); 91 if (!Valid) 92 numberInstructions(); 93 assert(InstrIdx.count(I) && "Unknown instruction"); 94 return InstrIdx[I]; 95 } 96 97 Instruction *getInstruction(unsigned loc) { 98 if (!Valid) 99 numberInstructions(); 100 assert(InstrVec.size() > loc && "Invalid Index"); 101 return InstrVec[loc]; 102 } 103 104 void forget() { Valid = false; } 105 106private: 107 /// The block we are numbering. 108 BasicBlock *BB; 109 /// Is the block numbered. 110 bool Valid; 111 /// Maps instructions to numbers and back. 112 SmallDenseMap<Instruction *, int> InstrIdx; 113 /// Maps integers to Instructions. 114 SmallVector<Instruction *, 32> InstrVec; 115}; 116 117/// \returns the parent basic block if all of the instructions in \p VL 118/// are in the same block or null otherwise. 119static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 120 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 121 if (!I0) 122 return 0; 123 BasicBlock *BB = I0->getParent(); 124 for (int i = 1, e = VL.size(); i < e; i++) { 125 Instruction *I = dyn_cast<Instruction>(VL[i]); 126 if (!I) 127 return 0; 128 129 if (BB != I->getParent()) 130 return 0; 131 } 132 return BB; 133} 134 135/// \returns True if all of the values in \p VL are constants. 136static bool allConstant(ArrayRef<Value *> VL) { 137 for (unsigned i = 0, e = VL.size(); i < e; ++i) 138 if (!isa<Constant>(VL[i])) 139 return false; 140 return true; 141} 142 143/// \returns True if all of the values in \p VL are identical. 144static bool isSplat(ArrayRef<Value *> VL) { 145 for (unsigned i = 1, e = VL.size(); i < e; ++i) 146 if (VL[i] != VL[0]) 147 return false; 148 return true; 149} 150 151/// \returns The opcode if all of the Instructions in \p VL have the same 152/// opcode, or zero. 153static unsigned getSameOpcode(ArrayRef<Value *> VL) { 154 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 155 if (!I0) 156 return 0; 157 unsigned Opcode = I0->getOpcode(); 158 for (int i = 1, e = VL.size(); i < e; i++) { 159 Instruction *I = dyn_cast<Instruction>(VL[i]); 160 if (!I || Opcode != I->getOpcode()) 161 return 0; 162 } 163 return Opcode; 164} 165 166/// \returns \p I after propagating metadata from \p VL. 167static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 168 Instruction *I0 = cast<Instruction>(VL[0]); 169 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 170 I0->getAllMetadataOtherThanDebugLoc(Metadata); 171 172 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 173 unsigned Kind = Metadata[i].first; 174 MDNode *MD = Metadata[i].second; 175 176 for (int i = 1, e = VL.size(); MD && i != e; i++) { 177 Instruction *I = cast<Instruction>(VL[i]); 178 MDNode *IMD = I->getMetadata(Kind); 179 180 switch (Kind) { 181 default: 182 MD = 0; // Remove unknown metadata 183 break; 184 case LLVMContext::MD_tbaa: 185 MD = MDNode::getMostGenericTBAA(MD, IMD); 186 break; 187 case LLVMContext::MD_fpmath: 188 MD = MDNode::getMostGenericFPMath(MD, IMD); 189 break; 190 } 191 } 192 I->setMetadata(Kind, MD); 193 } 194 return I; 195} 196 197/// \returns The type that all of the values in \p VL have or null if there 198/// are different types. 199static Type* getSameType(ArrayRef<Value *> VL) { 200 Type *Ty = VL[0]->getType(); 201 for (int i = 1, e = VL.size(); i < e; i++) 202 if (VL[i]->getType() != Ty) 203 return 0; 204 205 return Ty; 206} 207 208/// \returns True if the ExtractElement instructions in VL can be vectorized 209/// to use the original vector. 210static bool CanReuseExtract(ArrayRef<Value *> VL) { 211 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 212 // Check if all of the extracts come from the same vector and from the 213 // correct offset. 214 Value *VL0 = VL[0]; 215 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 216 Value *Vec = E0->getOperand(0); 217 218 // We have to extract from the same vector type. 219 unsigned NElts = Vec->getType()->getVectorNumElements(); 220 221 if (NElts != VL.size()) 222 return false; 223 224 // Check that all of the indices extract from the correct offset. 225 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 226 if (!CI || CI->getZExtValue()) 227 return false; 228 229 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 230 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 231 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 232 233 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 234 return false; 235 } 236 237 return true; 238} 239 240static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 241 SmallVectorImpl<Value *> &Left, 242 SmallVectorImpl<Value *> &Right) { 243 244 SmallVector<Value *, 16> OrigLeft, OrigRight; 245 246 bool AllSameOpcodeLeft = true; 247 bool AllSameOpcodeRight = true; 248 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 249 Instruction *I = cast<Instruction>(VL[i]); 250 Value *V0 = I->getOperand(0); 251 Value *V1 = I->getOperand(1); 252 253 OrigLeft.push_back(V0); 254 OrigRight.push_back(V1); 255 256 Instruction *I0 = dyn_cast<Instruction>(V0); 257 Instruction *I1 = dyn_cast<Instruction>(V1); 258 259 // Check whether all operands on one side have the same opcode. In this case 260 // we want to preserve the original order and not make things worse by 261 // reordering. 262 AllSameOpcodeLeft = I0; 263 AllSameOpcodeRight = I1; 264 265 if (i && AllSameOpcodeLeft) { 266 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 267 if(P0->getOpcode() != I0->getOpcode()) 268 AllSameOpcodeLeft = false; 269 } else 270 AllSameOpcodeLeft = false; 271 } 272 if (i && AllSameOpcodeRight) { 273 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 274 if(P1->getOpcode() != I1->getOpcode()) 275 AllSameOpcodeRight = false; 276 } else 277 AllSameOpcodeRight = false; 278 } 279 280 // Sort two opcodes. In the code below we try to preserve the ability to use 281 // broadcast of values instead of individual inserts. 282 // vl1 = load 283 // vl2 = phi 284 // vr1 = load 285 // vr2 = vr2 286 // = vl1 x vr1 287 // = vl2 x vr2 288 // If we just sorted according to opcode we would leave the first line in 289 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 290 // = vl1 x vr1 291 // = vr2 x vl2 292 // Because vr2 and vr1 are from the same load we loose the opportunity of a 293 // broadcast for the packed right side in the backend: we have [vr1, vl2] 294 // instead of [vr1, vr2=vr1]. 295 if (I0 && I1) { 296 if(!i && I0->getOpcode() > I1->getOpcode()) { 297 Left.push_back(I1); 298 Right.push_back(I0); 299 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 300 // Try not to destroy a broad cast for no apparent benefit. 301 Left.push_back(I1); 302 Right.push_back(I0); 303 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 304 // Try preserve broadcasts. 305 Left.push_back(I1); 306 Right.push_back(I0); 307 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 308 // Try preserve broadcasts. 309 Left.push_back(I1); 310 Right.push_back(I0); 311 } else { 312 Left.push_back(I0); 313 Right.push_back(I1); 314 } 315 continue; 316 } 317 // One opcode, put the instruction on the right. 318 if (I0) { 319 Left.push_back(V1); 320 Right.push_back(I0); 321 continue; 322 } 323 Left.push_back(V0); 324 Right.push_back(V1); 325 } 326 327 bool LeftBroadcast = isSplat(Left); 328 bool RightBroadcast = isSplat(Right); 329 330 // Don't reorder if the operands where good to begin with. 331 if (!(LeftBroadcast || RightBroadcast) && 332 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 333 Left = OrigLeft; 334 Right = OrigRight; 335 } 336} 337 338/// Bottom Up SLP Vectorizer. 339class BoUpSLP { 340public: 341 typedef SmallVector<Value *, 8> ValueList; 342 typedef SmallVector<Instruction *, 16> InstrList; 343 typedef SmallPtrSet<Value *, 16> ValueSet; 344 typedef SmallVector<StoreInst *, 8> StoreList; 345 346 BoUpSLP(Function *Func, ScalarEvolution *Se, DataLayout *Dl, 347 TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li, 348 DominatorTree *Dt) : 349 F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt), 350 Builder(Se->getContext()) { 351 // Setup the block numbering utility for all of the blocks in the 352 // function. 353 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 354 BasicBlock *BB = it; 355 BlocksNumbers[BB] = BlockNumbering(BB); 356 } 357 } 358 359 /// \brief Vectorize the tree that starts with the elements in \p VL. 360 /// Returns the vectorized root. 361 Value *vectorizeTree(); 362 363 /// \returns the vectorization cost of the subtree that starts at \p VL. 364 /// A negative number means that this is profitable. 365 int getTreeCost(); 366 367 /// Construct a vectorizable tree that starts at \p Roots and is possibly 368 /// used by a reduction of \p RdxOps. 369 void buildTree(ArrayRef<Value *> Roots, ValueSet *RdxOps = 0); 370 371 /// Clear the internal data structures that are created by 'buildTree'. 372 void deleteTree() { 373 RdxOps = 0; 374 VectorizableTree.clear(); 375 ScalarToTreeEntry.clear(); 376 MustGather.clear(); 377 ExternalUses.clear(); 378 MemBarrierIgnoreList.clear(); 379 } 380 381 /// \returns true if the memory operations A and B are consecutive. 382 bool isConsecutiveAccess(Value *A, Value *B); 383 384 /// \brief Perform LICM and CSE on the newly generated gather sequences. 385 void optimizeGatherSequence(); 386private: 387 struct TreeEntry; 388 389 /// \returns the cost of the vectorizable entry. 390 int getEntryCost(TreeEntry *E); 391 392 /// This is the recursive part of buildTree. 393 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 394 395 /// Vectorize a single entry in the tree. 396 Value *vectorizeTree(TreeEntry *E); 397 398 /// Vectorize a single entry in the tree, starting in \p VL. 399 Value *vectorizeTree(ArrayRef<Value *> VL); 400 401 /// \returns the pointer to the vectorized value if \p VL is already 402 /// vectorized, or NULL. They may happen in cycles. 403 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 404 405 /// \brief Take the pointer operand from the Load/Store instruction. 406 /// \returns NULL if this is not a valid Load/Store instruction. 407 static Value *getPointerOperand(Value *I); 408 409 /// \brief Take the address space operand from the Load/Store instruction. 410 /// \returns -1 if this is not a valid Load/Store instruction. 411 static unsigned getAddressSpaceOperand(Value *I); 412 413 /// \returns the scalarization cost for this type. Scalarization in this 414 /// context means the creation of vectors from a group of scalars. 415 int getGatherCost(Type *Ty); 416 417 /// \returns the scalarization cost for this list of values. Assuming that 418 /// this subtree gets vectorized, we may need to extract the values from the 419 /// roots. This method calculates the cost of extracting the values. 420 int getGatherCost(ArrayRef<Value *> VL); 421 422 /// \returns the AA location that is being access by the instruction. 423 AliasAnalysis::Location getLocation(Instruction *I); 424 425 /// \brief Checks if it is possible to sink an instruction from 426 /// \p Src to \p Dst. 427 /// \returns the pointer to the barrier instruction if we can't sink. 428 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 429 430 /// \returns the index of the last instruction in the BB from \p VL. 431 int getLastIndex(ArrayRef<Value *> VL); 432 433 /// \returns the Instruction in the bundle \p VL. 434 Instruction *getLastInstruction(ArrayRef<Value *> VL); 435 436 /// \brief Set the Builder insert point to one after the last instruction in 437 /// the bundle 438 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 439 440 /// \returns a vector from a collection of scalars in \p VL. 441 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 442 443 /// \returns whether the VectorizableTree is fully vectoriable and will 444 /// be beneficial even the tree height is tiny. 445 bool isFullyVectorizableTinyTree(); 446 447 struct TreeEntry { 448 TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0), 449 NeedToGather(0) {} 450 451 /// \returns true if the scalars in VL are equal to this entry. 452 bool isSame(ArrayRef<Value *> VL) const { 453 assert(VL.size() == Scalars.size() && "Invalid size"); 454 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 455 } 456 457 /// A vector of scalars. 458 ValueList Scalars; 459 460 /// The Scalars are vectorized into this value. It is initialized to Null. 461 Value *VectorizedValue; 462 463 /// The index in the basic block of the last scalar. 464 int LastScalarIndex; 465 466 /// Do we need to gather this sequence ? 467 bool NeedToGather; 468 }; 469 470 /// Create a new VectorizableTree entry. 471 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 472 VectorizableTree.push_back(TreeEntry()); 473 int idx = VectorizableTree.size() - 1; 474 TreeEntry *Last = &VectorizableTree[idx]; 475 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 476 Last->NeedToGather = !Vectorized; 477 if (Vectorized) { 478 Last->LastScalarIndex = getLastIndex(VL); 479 for (int i = 0, e = VL.size(); i != e; ++i) { 480 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 481 ScalarToTreeEntry[VL[i]] = idx; 482 } 483 } else { 484 Last->LastScalarIndex = 0; 485 MustGather.insert(VL.begin(), VL.end()); 486 } 487 return Last; 488 } 489 490 /// -- Vectorization State -- 491 /// Holds all of the tree entries. 492 std::vector<TreeEntry> VectorizableTree; 493 494 /// Maps a specific scalar to its tree entry. 495 SmallDenseMap<Value*, int> ScalarToTreeEntry; 496 497 /// A list of scalars that we found that we need to keep as scalars. 498 ValueSet MustGather; 499 500 /// This POD struct describes one external user in the vectorized tree. 501 struct ExternalUser { 502 ExternalUser (Value *S, llvm::User *U, int L) : 503 Scalar(S), User(U), Lane(L){}; 504 // Which scalar in our function. 505 Value *Scalar; 506 // Which user that uses the scalar. 507 llvm::User *User; 508 // Which lane does the scalar belong to. 509 int Lane; 510 }; 511 typedef SmallVector<ExternalUser, 16> UserList; 512 513 /// A list of values that need to extracted out of the tree. 514 /// This list holds pairs of (Internal Scalar : External User). 515 UserList ExternalUses; 516 517 /// A list of instructions to ignore while sinking 518 /// memory instructions. This map must be reset between runs of getCost. 519 ValueSet MemBarrierIgnoreList; 520 521 /// Holds all of the instructions that we gathered. 522 SetVector<Instruction *> GatherSeq; 523 /// A list of blocks that we are going to CSE. 524 SmallSet<BasicBlock *, 8> CSEBlocks; 525 526 /// Numbers instructions in different blocks. 527 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 528 529 /// Reduction operators. 530 ValueSet *RdxOps; 531 532 // Analysis and block reference. 533 Function *F; 534 ScalarEvolution *SE; 535 DataLayout *DL; 536 TargetTransformInfo *TTI; 537 AliasAnalysis *AA; 538 LoopInfo *LI; 539 DominatorTree *DT; 540 /// Instruction builder to construct the vectorized tree. 541 IRBuilder<> Builder; 542}; 543 544void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) { 545 deleteTree(); 546 RdxOps = Rdx; 547 if (!getSameType(Roots)) 548 return; 549 buildTree_rec(Roots, 0); 550 551 // Collect the values that we need to extract from the tree. 552 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 553 TreeEntry *Entry = &VectorizableTree[EIdx]; 554 555 // For each lane: 556 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 557 Value *Scalar = Entry->Scalars[Lane]; 558 559 // No need to handle users of gathered values. 560 if (Entry->NeedToGather) 561 continue; 562 563 for (Value::use_iterator User = Scalar->use_begin(), 564 UE = Scalar->use_end(); User != UE; ++User) { 565 DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n"); 566 567 // Skip in-tree scalars that become vectors. 568 if (ScalarToTreeEntry.count(*User)) { 569 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 570 **User << ".\n"); 571 int Idx = ScalarToTreeEntry[*User]; (void) Idx; 572 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 573 continue; 574 } 575 Instruction *UserInst = dyn_cast<Instruction>(*User); 576 if (!UserInst) 577 continue; 578 579 // Ignore uses that are part of the reduction. 580 if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end()) 581 continue; 582 583 DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " << 584 Lane << " from " << *Scalar << ".\n"); 585 ExternalUses.push_back(ExternalUser(Scalar, *User, Lane)); 586 } 587 } 588 } 589} 590 591 592void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 593 bool SameTy = getSameType(VL); (void)SameTy; 594 assert(SameTy && "Invalid types!"); 595 596 if (Depth == RecursionMaxDepth) { 597 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 598 newTreeEntry(VL, false); 599 return; 600 } 601 602 // Don't handle vectors. 603 if (VL[0]->getType()->isVectorTy()) { 604 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 605 newTreeEntry(VL, false); 606 return; 607 } 608 609 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 610 if (SI->getValueOperand()->getType()->isVectorTy()) { 611 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 612 newTreeEntry(VL, false); 613 return; 614 } 615 616 // If all of the operands are identical or constant we have a simple solution. 617 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || 618 !getSameOpcode(VL)) { 619 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 620 newTreeEntry(VL, false); 621 return; 622 } 623 624 // We now know that this is a vector of instructions of the same type from 625 // the same block. 626 627 // Check if this is a duplicate of another entry. 628 if (ScalarToTreeEntry.count(VL[0])) { 629 int Idx = ScalarToTreeEntry[VL[0]]; 630 TreeEntry *E = &VectorizableTree[Idx]; 631 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 632 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 633 if (E->Scalars[i] != VL[i]) { 634 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 635 newTreeEntry(VL, false); 636 return; 637 } 638 } 639 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 640 return; 641 } 642 643 // Check that none of the instructions in the bundle are already in the tree. 644 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 645 if (ScalarToTreeEntry.count(VL[i])) { 646 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 647 ") is already in tree.\n"); 648 newTreeEntry(VL, false); 649 return; 650 } 651 } 652 653 // If any of the scalars appears in the table OR it is marked as a value that 654 // needs to stat scalar then we need to gather the scalars. 655 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 656 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 657 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 658 newTreeEntry(VL, false); 659 return; 660 } 661 } 662 663 // Check that all of the users of the scalars that we want to vectorize are 664 // schedulable. 665 Instruction *VL0 = cast<Instruction>(VL[0]); 666 int MyLastIndex = getLastIndex(VL); 667 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 668 669 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 670 Instruction *Scalar = cast<Instruction>(VL[i]); 671 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 672 for (Value::use_iterator U = Scalar->use_begin(), UE = Scalar->use_end(); 673 U != UE; ++U) { 674 DEBUG(dbgs() << "SLP: \tUser " << **U << ". \n"); 675 Instruction *User = dyn_cast<Instruction>(*U); 676 if (!User) { 677 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 678 newTreeEntry(VL, false); 679 return; 680 } 681 682 // We don't care if the user is in a different basic block. 683 BasicBlock *UserBlock = User->getParent(); 684 if (UserBlock != BB) { 685 DEBUG(dbgs() << "SLP: User from a different basic block " 686 << *User << ". \n"); 687 continue; 688 } 689 690 // If this is a PHINode within this basic block then we can place the 691 // extract wherever we want. 692 if (isa<PHINode>(*User)) { 693 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *User << ". \n"); 694 continue; 695 } 696 697 // Check if this is a safe in-tree user. 698 if (ScalarToTreeEntry.count(User)) { 699 int Idx = ScalarToTreeEntry[User]; 700 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 701 if (VecLocation <= MyLastIndex) { 702 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 703 newTreeEntry(VL, false); 704 return; 705 } 706 DEBUG(dbgs() << "SLP: In-tree user (" << *User << ") at #" << 707 VecLocation << " vector value (" << *Scalar << ") at #" 708 << MyLastIndex << ".\n"); 709 continue; 710 } 711 712 // This user is part of the reduction. 713 if (RdxOps && RdxOps->count(User)) 714 continue; 715 716 // Make sure that we can schedule this unknown user. 717 BlockNumbering &BN = BlocksNumbers[BB]; 718 int UserIndex = BN.getIndex(User); 719 if (UserIndex < MyLastIndex) { 720 721 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 722 << *User << ". \n"); 723 newTreeEntry(VL, false); 724 return; 725 } 726 } 727 } 728 729 // Check that every instructions appears once in this bundle. 730 for (unsigned i = 0, e = VL.size(); i < e; ++i) 731 for (unsigned j = i+1; j < e; ++j) 732 if (VL[i] == VL[j]) { 733 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 734 newTreeEntry(VL, false); 735 return; 736 } 737 738 // Check that instructions in this bundle don't reference other instructions. 739 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 740 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 741 for (Value::use_iterator U = VL[i]->use_begin(), UE = VL[i]->use_end(); 742 U != UE; ++U) { 743 for (unsigned j = 0; j < e; ++j) { 744 if (i != j && *U == VL[j]) { 745 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << **U << ". \n"); 746 newTreeEntry(VL, false); 747 return; 748 } 749 } 750 } 751 } 752 753 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 754 755 unsigned Opcode = getSameOpcode(VL); 756 757 // Check if it is safe to sink the loads or the stores. 758 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 759 Instruction *Last = getLastInstruction(VL); 760 761 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 762 if (VL[i] == Last) 763 continue; 764 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 765 if (Barrier) { 766 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 767 << "\n because of " << *Barrier << ". Gathering.\n"); 768 newTreeEntry(VL, false); 769 return; 770 } 771 } 772 } 773 774 switch (Opcode) { 775 case Instruction::PHI: { 776 PHINode *PH = dyn_cast<PHINode>(VL0); 777 778 // Check for terminator values (e.g. invoke). 779 for (unsigned j = 0; j < VL.size(); ++j) 780 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 781 TerminatorInst *Term = dyn_cast<TerminatorInst>(cast<PHINode>(VL[j])->getIncomingValue(i)); 782 if (Term) { 783 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 784 newTreeEntry(VL, false); 785 return; 786 } 787 } 788 789 newTreeEntry(VL, true); 790 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 791 792 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 793 ValueList Operands; 794 // Prepare the operand vector. 795 for (unsigned j = 0; j < VL.size(); ++j) 796 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValue(i)); 797 798 buildTree_rec(Operands, Depth + 1); 799 } 800 return; 801 } 802 case Instruction::ExtractElement: { 803 bool Reuse = CanReuseExtract(VL); 804 if (Reuse) { 805 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 806 } 807 newTreeEntry(VL, Reuse); 808 return; 809 } 810 case Instruction::Load: { 811 // Check if the loads are consecutive or of we need to swizzle them. 812 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 813 LoadInst *L = cast<LoadInst>(VL[i]); 814 if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { 815 newTreeEntry(VL, false); 816 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 817 return; 818 } 819 } 820 newTreeEntry(VL, true); 821 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 822 return; 823 } 824 case Instruction::ZExt: 825 case Instruction::SExt: 826 case Instruction::FPToUI: 827 case Instruction::FPToSI: 828 case Instruction::FPExt: 829 case Instruction::PtrToInt: 830 case Instruction::IntToPtr: 831 case Instruction::SIToFP: 832 case Instruction::UIToFP: 833 case Instruction::Trunc: 834 case Instruction::FPTrunc: 835 case Instruction::BitCast: { 836 Type *SrcTy = VL0->getOperand(0)->getType(); 837 for (unsigned i = 0; i < VL.size(); ++i) { 838 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 839 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 840 newTreeEntry(VL, false); 841 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 842 return; 843 } 844 } 845 newTreeEntry(VL, true); 846 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 847 848 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 849 ValueList Operands; 850 // Prepare the operand vector. 851 for (unsigned j = 0; j < VL.size(); ++j) 852 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 853 854 buildTree_rec(Operands, Depth+1); 855 } 856 return; 857 } 858 case Instruction::ICmp: 859 case Instruction::FCmp: { 860 // Check that all of the compares have the same predicate. 861 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 862 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 863 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 864 CmpInst *Cmp = cast<CmpInst>(VL[i]); 865 if (Cmp->getPredicate() != P0 || 866 Cmp->getOperand(0)->getType() != ComparedTy) { 867 newTreeEntry(VL, false); 868 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 869 return; 870 } 871 } 872 873 newTreeEntry(VL, true); 874 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 875 876 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 877 ValueList Operands; 878 // Prepare the operand vector. 879 for (unsigned j = 0; j < VL.size(); ++j) 880 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 881 882 buildTree_rec(Operands, Depth+1); 883 } 884 return; 885 } 886 case Instruction::Select: 887 case Instruction::Add: 888 case Instruction::FAdd: 889 case Instruction::Sub: 890 case Instruction::FSub: 891 case Instruction::Mul: 892 case Instruction::FMul: 893 case Instruction::UDiv: 894 case Instruction::SDiv: 895 case Instruction::FDiv: 896 case Instruction::URem: 897 case Instruction::SRem: 898 case Instruction::FRem: 899 case Instruction::Shl: 900 case Instruction::LShr: 901 case Instruction::AShr: 902 case Instruction::And: 903 case Instruction::Or: 904 case Instruction::Xor: { 905 newTreeEntry(VL, true); 906 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 907 908 // Sort operands of the instructions so that each side is more likely to 909 // have the same opcode. 910 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 911 ValueList Left, Right; 912 reorderInputsAccordingToOpcode(VL, Left, Right); 913 buildTree_rec(Left, Depth + 1); 914 buildTree_rec(Right, Depth + 1); 915 return; 916 } 917 918 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 919 ValueList Operands; 920 // Prepare the operand vector. 921 for (unsigned j = 0; j < VL.size(); ++j) 922 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 923 924 buildTree_rec(Operands, Depth+1); 925 } 926 return; 927 } 928 case Instruction::Store: { 929 // Check if the stores are consecutive or of we need to swizzle them. 930 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 931 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 932 newTreeEntry(VL, false); 933 DEBUG(dbgs() << "SLP: Non consecutive store.\n"); 934 return; 935 } 936 937 newTreeEntry(VL, true); 938 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 939 940 ValueList Operands; 941 for (unsigned j = 0; j < VL.size(); ++j) 942 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 943 944 // We can ignore these values because we are sinking them down. 945 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 946 buildTree_rec(Operands, Depth + 1); 947 return; 948 } 949 default: 950 newTreeEntry(VL, false); 951 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 952 return; 953 } 954} 955 956int BoUpSLP::getEntryCost(TreeEntry *E) { 957 ArrayRef<Value*> VL = E->Scalars; 958 959 Type *ScalarTy = VL[0]->getType(); 960 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 961 ScalarTy = SI->getValueOperand()->getType(); 962 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 963 964 if (E->NeedToGather) { 965 if (allConstant(VL)) 966 return 0; 967 if (isSplat(VL)) { 968 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 969 } 970 return getGatherCost(E->Scalars); 971 } 972 973 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) && 974 "Invalid VL"); 975 Instruction *VL0 = cast<Instruction>(VL[0]); 976 unsigned Opcode = VL0->getOpcode(); 977 switch (Opcode) { 978 case Instruction::PHI: { 979 return 0; 980 } 981 case Instruction::ExtractElement: { 982 if (CanReuseExtract(VL)) 983 return 0; 984 return getGatherCost(VecTy); 985 } 986 case Instruction::ZExt: 987 case Instruction::SExt: 988 case Instruction::FPToUI: 989 case Instruction::FPToSI: 990 case Instruction::FPExt: 991 case Instruction::PtrToInt: 992 case Instruction::IntToPtr: 993 case Instruction::SIToFP: 994 case Instruction::UIToFP: 995 case Instruction::Trunc: 996 case Instruction::FPTrunc: 997 case Instruction::BitCast: { 998 Type *SrcTy = VL0->getOperand(0)->getType(); 999 1000 // Calculate the cost of this instruction. 1001 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1002 VL0->getType(), SrcTy); 1003 1004 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1005 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1006 return VecCost - ScalarCost; 1007 } 1008 case Instruction::FCmp: 1009 case Instruction::ICmp: 1010 case Instruction::Select: 1011 case Instruction::Add: 1012 case Instruction::FAdd: 1013 case Instruction::Sub: 1014 case Instruction::FSub: 1015 case Instruction::Mul: 1016 case Instruction::FMul: 1017 case Instruction::UDiv: 1018 case Instruction::SDiv: 1019 case Instruction::FDiv: 1020 case Instruction::URem: 1021 case Instruction::SRem: 1022 case Instruction::FRem: 1023 case Instruction::Shl: 1024 case Instruction::LShr: 1025 case Instruction::AShr: 1026 case Instruction::And: 1027 case Instruction::Or: 1028 case Instruction::Xor: { 1029 // Calculate the cost of this instruction. 1030 int ScalarCost = 0; 1031 int VecCost = 0; 1032 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1033 Opcode == Instruction::Select) { 1034 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1035 ScalarCost = VecTy->getNumElements() * 1036 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1037 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1038 } else { 1039 // Certain instructions can be cheaper to vectorize if they have a 1040 // constant second vector operand. 1041 TargetTransformInfo::OperandValueKind Op1VK = 1042 TargetTransformInfo::OK_AnyValue; 1043 TargetTransformInfo::OperandValueKind Op2VK = 1044 TargetTransformInfo::OK_UniformConstantValue; 1045 1046 // Check whether all second operands are constant. 1047 for (unsigned i = 0; i < VL.size(); ++i) 1048 if (!isa<ConstantInt>(cast<Instruction>(VL[i])->getOperand(1))) { 1049 Op2VK = TargetTransformInfo::OK_AnyValue; 1050 break; 1051 } 1052 1053 ScalarCost = 1054 VecTy->getNumElements() * 1055 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); 1056 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); 1057 } 1058 return VecCost - ScalarCost; 1059 } 1060 case Instruction::Load: { 1061 // Cost of wide load - cost of scalar loads. 1062 int ScalarLdCost = VecTy->getNumElements() * 1063 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1064 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1065 return VecLdCost - ScalarLdCost; 1066 } 1067 case Instruction::Store: { 1068 // We know that we can merge the stores. Calculate the cost. 1069 int ScalarStCost = VecTy->getNumElements() * 1070 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1071 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1072 return VecStCost - ScalarStCost; 1073 } 1074 default: 1075 llvm_unreachable("Unknown instruction"); 1076 } 1077} 1078 1079bool BoUpSLP::isFullyVectorizableTinyTree() { 1080 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1081 VectorizableTree.size() << " is fully vectorizable .\n"); 1082 1083 // We only handle trees of height 2. 1084 if (VectorizableTree.size() != 2) 1085 return false; 1086 1087 // Gathering cost would be too much for tiny trees. 1088 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1089 return false; 1090 1091 return true; 1092} 1093 1094int BoUpSLP::getTreeCost() { 1095 int Cost = 0; 1096 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1097 VectorizableTree.size() << ".\n"); 1098 1099 // We only vectorize tiny trees if it is fully vectorizable. 1100 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1101 if (!VectorizableTree.size()) { 1102 assert(!ExternalUses.size() && "We should not have any external users"); 1103 } 1104 return INT_MAX; 1105 } 1106 1107 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1108 1109 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1110 int C = getEntryCost(&VectorizableTree[i]); 1111 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1112 << *VectorizableTree[i].Scalars[0] << " .\n"); 1113 Cost += C; 1114 } 1115 1116 int ExtractCost = 0; 1117 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1118 I != E; ++I) { 1119 1120 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1121 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1122 I->Lane); 1123 } 1124 1125 1126 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1127 return Cost + ExtractCost; 1128} 1129 1130int BoUpSLP::getGatherCost(Type *Ty) { 1131 int Cost = 0; 1132 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1133 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1134 return Cost; 1135} 1136 1137int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1138 // Find the type of the operands in VL. 1139 Type *ScalarTy = VL[0]->getType(); 1140 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1141 ScalarTy = SI->getValueOperand()->getType(); 1142 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1143 // Find the cost of inserting/extracting values from the vector. 1144 return getGatherCost(VecTy); 1145} 1146 1147AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 1148 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1149 return AA->getLocation(SI); 1150 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1151 return AA->getLocation(LI); 1152 return AliasAnalysis::Location(); 1153} 1154 1155Value *BoUpSLP::getPointerOperand(Value *I) { 1156 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1157 return LI->getPointerOperand(); 1158 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1159 return SI->getPointerOperand(); 1160 return 0; 1161} 1162 1163unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1164 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1165 return L->getPointerAddressSpace(); 1166 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1167 return S->getPointerAddressSpace(); 1168 return -1; 1169} 1170 1171bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1172 Value *PtrA = getPointerOperand(A); 1173 Value *PtrB = getPointerOperand(B); 1174 unsigned ASA = getAddressSpaceOperand(A); 1175 unsigned ASB = getAddressSpaceOperand(B); 1176 1177 // Check that the address spaces match and that the pointers are valid. 1178 if (!PtrA || !PtrB || (ASA != ASB)) 1179 return false; 1180 1181 // Make sure that A and B are different pointers of the same type. 1182 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1183 return false; 1184 1185 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1186 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1187 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1188 1189 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1190 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1191 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1192 1193 APInt OffsetDelta = OffsetB - OffsetA; 1194 1195 // Check if they are based on the same pointer. That makes the offsets 1196 // sufficient. 1197 if (PtrA == PtrB) 1198 return OffsetDelta == Size; 1199 1200 // Compute the necessary base pointer delta to have the necessary final delta 1201 // equal to the size. 1202 APInt BaseDelta = Size - OffsetDelta; 1203 1204 // Otherwise compute the distance with SCEV between the base pointers. 1205 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1206 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1207 const SCEV *C = SE->getConstant(BaseDelta); 1208 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1209 return X == PtrSCEVB; 1210} 1211 1212Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1213 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1214 BasicBlock::iterator I = Src, E = Dst; 1215 /// Scan all of the instruction from SRC to DST and check if 1216 /// the source may alias. 1217 for (++I; I != E; ++I) { 1218 // Ignore store instructions that are marked as 'ignore'. 1219 if (MemBarrierIgnoreList.count(I)) 1220 continue; 1221 if (Src->mayWriteToMemory()) /* Write */ { 1222 if (!I->mayReadOrWriteMemory()) 1223 continue; 1224 } else /* Read */ { 1225 if (!I->mayWriteToMemory()) 1226 continue; 1227 } 1228 AliasAnalysis::Location A = getLocation(&*I); 1229 AliasAnalysis::Location B = getLocation(Src); 1230 1231 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1232 return I; 1233 } 1234 return 0; 1235} 1236 1237int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1238 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1239 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1240 BlockNumbering &BN = BlocksNumbers[BB]; 1241 1242 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1243 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1244 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1245 return MaxIdx; 1246} 1247 1248Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1249 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1250 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1251 BlockNumbering &BN = BlocksNumbers[BB]; 1252 1253 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1254 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1255 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1256 Instruction *I = BN.getInstruction(MaxIdx); 1257 assert(I && "bad location"); 1258 return I; 1259} 1260 1261void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1262 Instruction *VL0 = cast<Instruction>(VL[0]); 1263 Instruction *LastInst = getLastInstruction(VL); 1264 BasicBlock::iterator NextInst = LastInst; 1265 ++NextInst; 1266 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1267 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1268} 1269 1270Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1271 Value *Vec = UndefValue::get(Ty); 1272 // Generate the 'InsertElement' instruction. 1273 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1274 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1275 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1276 GatherSeq.insert(Insrt); 1277 CSEBlocks.insert(Insrt->getParent()); 1278 1279 // Add to our 'need-to-extract' list. 1280 if (ScalarToTreeEntry.count(VL[i])) { 1281 int Idx = ScalarToTreeEntry[VL[i]]; 1282 TreeEntry *E = &VectorizableTree[Idx]; 1283 // Find which lane we need to extract. 1284 int FoundLane = -1; 1285 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1286 // Is this the lane of the scalar that we are looking for ? 1287 if (E->Scalars[Lane] == VL[i]) { 1288 FoundLane = Lane; 1289 break; 1290 } 1291 } 1292 assert(FoundLane >= 0 && "Could not find the correct lane"); 1293 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1294 } 1295 } 1296 } 1297 1298 return Vec; 1299} 1300 1301Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1302 SmallDenseMap<Value*, int>::const_iterator Entry 1303 = ScalarToTreeEntry.find(VL[0]); 1304 if (Entry != ScalarToTreeEntry.end()) { 1305 int Idx = Entry->second; 1306 const TreeEntry *En = &VectorizableTree[Idx]; 1307 if (En->isSame(VL) && En->VectorizedValue) 1308 return En->VectorizedValue; 1309 } 1310 return 0; 1311} 1312 1313Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1314 if (ScalarToTreeEntry.count(VL[0])) { 1315 int Idx = ScalarToTreeEntry[VL[0]]; 1316 TreeEntry *E = &VectorizableTree[Idx]; 1317 if (E->isSame(VL)) 1318 return vectorizeTree(E); 1319 } 1320 1321 Type *ScalarTy = VL[0]->getType(); 1322 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1323 ScalarTy = SI->getValueOperand()->getType(); 1324 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1325 1326 return Gather(VL, VecTy); 1327} 1328 1329Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1330 IRBuilder<>::InsertPointGuard Guard(Builder); 1331 1332 if (E->VectorizedValue) { 1333 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1334 return E->VectorizedValue; 1335 } 1336 1337 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1338 Type *ScalarTy = VL0->getType(); 1339 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1340 ScalarTy = SI->getValueOperand()->getType(); 1341 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1342 1343 if (E->NeedToGather) { 1344 setInsertPointAfterBundle(E->Scalars); 1345 return Gather(E->Scalars, VecTy); 1346 } 1347 1348 unsigned Opcode = VL0->getOpcode(); 1349 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode"); 1350 1351 switch (Opcode) { 1352 case Instruction::PHI: { 1353 PHINode *PH = dyn_cast<PHINode>(VL0); 1354 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1355 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1356 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1357 E->VectorizedValue = NewPhi; 1358 1359 // PHINodes may have multiple entries from the same block. We want to 1360 // visit every block once. 1361 SmallSet<BasicBlock*, 4> VisitedBBs; 1362 1363 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1364 ValueList Operands; 1365 BasicBlock *IBB = PH->getIncomingBlock(i); 1366 1367 if (!VisitedBBs.insert(IBB)) { 1368 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1369 continue; 1370 } 1371 1372 // Prepare the operand vector. 1373 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1374 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1375 getIncomingValueForBlock(IBB)); 1376 1377 Builder.SetInsertPoint(IBB->getTerminator()); 1378 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1379 Value *Vec = vectorizeTree(Operands); 1380 NewPhi->addIncoming(Vec, IBB); 1381 } 1382 1383 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1384 "Invalid number of incoming values"); 1385 return NewPhi; 1386 } 1387 1388 case Instruction::ExtractElement: { 1389 if (CanReuseExtract(E->Scalars)) { 1390 Value *V = VL0->getOperand(0); 1391 E->VectorizedValue = V; 1392 return V; 1393 } 1394 return Gather(E->Scalars, VecTy); 1395 } 1396 case Instruction::ZExt: 1397 case Instruction::SExt: 1398 case Instruction::FPToUI: 1399 case Instruction::FPToSI: 1400 case Instruction::FPExt: 1401 case Instruction::PtrToInt: 1402 case Instruction::IntToPtr: 1403 case Instruction::SIToFP: 1404 case Instruction::UIToFP: 1405 case Instruction::Trunc: 1406 case Instruction::FPTrunc: 1407 case Instruction::BitCast: { 1408 ValueList INVL; 1409 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1410 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1411 1412 setInsertPointAfterBundle(E->Scalars); 1413 1414 Value *InVec = vectorizeTree(INVL); 1415 1416 if (Value *V = alreadyVectorized(E->Scalars)) 1417 return V; 1418 1419 CastInst *CI = dyn_cast<CastInst>(VL0); 1420 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1421 E->VectorizedValue = V; 1422 return V; 1423 } 1424 case Instruction::FCmp: 1425 case Instruction::ICmp: { 1426 ValueList LHSV, RHSV; 1427 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1428 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1429 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1430 } 1431 1432 setInsertPointAfterBundle(E->Scalars); 1433 1434 Value *L = vectorizeTree(LHSV); 1435 Value *R = vectorizeTree(RHSV); 1436 1437 if (Value *V = alreadyVectorized(E->Scalars)) 1438 return V; 1439 1440 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1441 Value *V; 1442 if (Opcode == Instruction::FCmp) 1443 V = Builder.CreateFCmp(P0, L, R); 1444 else 1445 V = Builder.CreateICmp(P0, L, R); 1446 1447 E->VectorizedValue = V; 1448 return V; 1449 } 1450 case Instruction::Select: { 1451 ValueList TrueVec, FalseVec, CondVec; 1452 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1453 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1454 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1455 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1456 } 1457 1458 setInsertPointAfterBundle(E->Scalars); 1459 1460 Value *Cond = vectorizeTree(CondVec); 1461 Value *True = vectorizeTree(TrueVec); 1462 Value *False = vectorizeTree(FalseVec); 1463 1464 if (Value *V = alreadyVectorized(E->Scalars)) 1465 return V; 1466 1467 Value *V = Builder.CreateSelect(Cond, True, False); 1468 E->VectorizedValue = V; 1469 return V; 1470 } 1471 case Instruction::Add: 1472 case Instruction::FAdd: 1473 case Instruction::Sub: 1474 case Instruction::FSub: 1475 case Instruction::Mul: 1476 case Instruction::FMul: 1477 case Instruction::UDiv: 1478 case Instruction::SDiv: 1479 case Instruction::FDiv: 1480 case Instruction::URem: 1481 case Instruction::SRem: 1482 case Instruction::FRem: 1483 case Instruction::Shl: 1484 case Instruction::LShr: 1485 case Instruction::AShr: 1486 case Instruction::And: 1487 case Instruction::Or: 1488 case Instruction::Xor: { 1489 ValueList LHSVL, RHSVL; 1490 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1491 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1492 else 1493 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1494 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1495 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1496 } 1497 1498 setInsertPointAfterBundle(E->Scalars); 1499 1500 Value *LHS = vectorizeTree(LHSVL); 1501 Value *RHS = vectorizeTree(RHSVL); 1502 1503 if (LHS == RHS && isa<Instruction>(LHS)) { 1504 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1505 } 1506 1507 if (Value *V = alreadyVectorized(E->Scalars)) 1508 return V; 1509 1510 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1511 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1512 E->VectorizedValue = V; 1513 1514 if (Instruction *I = dyn_cast<Instruction>(V)) 1515 return propagateMetadata(I, E->Scalars); 1516 1517 return V; 1518 } 1519 case Instruction::Load: { 1520 // Loads are inserted at the head of the tree because we don't want to 1521 // sink them all the way down past store instructions. 1522 setInsertPointAfterBundle(E->Scalars); 1523 1524 LoadInst *LI = cast<LoadInst>(VL0); 1525 unsigned AS = LI->getPointerAddressSpace(); 1526 1527 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 1528 VecTy->getPointerTo(AS)); 1529 unsigned Alignment = LI->getAlignment(); 1530 LI = Builder.CreateLoad(VecPtr); 1531 LI->setAlignment(Alignment); 1532 E->VectorizedValue = LI; 1533 return propagateMetadata(LI, E->Scalars); 1534 } 1535 case Instruction::Store: { 1536 StoreInst *SI = cast<StoreInst>(VL0); 1537 unsigned Alignment = SI->getAlignment(); 1538 unsigned AS = SI->getPointerAddressSpace(); 1539 1540 ValueList ValueOp; 1541 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1542 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1543 1544 setInsertPointAfterBundle(E->Scalars); 1545 1546 Value *VecValue = vectorizeTree(ValueOp); 1547 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 1548 VecTy->getPointerTo(AS)); 1549 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1550 S->setAlignment(Alignment); 1551 E->VectorizedValue = S; 1552 return propagateMetadata(S, E->Scalars); 1553 } 1554 default: 1555 llvm_unreachable("unknown inst"); 1556 } 1557 return 0; 1558} 1559 1560Value *BoUpSLP::vectorizeTree() { 1561 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1562 vectorizeTree(&VectorizableTree[0]); 1563 1564 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1565 1566 // Extract all of the elements with the external uses. 1567 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1568 it != e; ++it) { 1569 Value *Scalar = it->Scalar; 1570 llvm::User *User = it->User; 1571 1572 // Skip users that we already RAUW. This happens when one instruction 1573 // has multiple uses of the same value. 1574 if (std::find(Scalar->use_begin(), Scalar->use_end(), User) == 1575 Scalar->use_end()) 1576 continue; 1577 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1578 1579 int Idx = ScalarToTreeEntry[Scalar]; 1580 TreeEntry *E = &VectorizableTree[Idx]; 1581 assert(!E->NeedToGather && "Extracting from a gather list"); 1582 1583 Value *Vec = E->VectorizedValue; 1584 assert(Vec && "Can't find vectorizable value"); 1585 1586 Value *Lane = Builder.getInt32(it->Lane); 1587 // Generate extracts for out-of-tree users. 1588 // Find the insertion point for the extractelement lane. 1589 if (PHINode *PN = dyn_cast<PHINode>(Vec)) { 1590 Builder.SetInsertPoint(PN->getParent()->getFirstInsertionPt()); 1591 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1592 CSEBlocks.insert(PN->getParent()); 1593 User->replaceUsesOfWith(Scalar, Ex); 1594 } else if (isa<Instruction>(Vec)){ 1595 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1596 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1597 if (PH->getIncomingValue(i) == Scalar) { 1598 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1599 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1600 CSEBlocks.insert(PH->getIncomingBlock(i)); 1601 PH->setOperand(i, Ex); 1602 } 1603 } 1604 } else { 1605 Builder.SetInsertPoint(cast<Instruction>(User)); 1606 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1607 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 1608 User->replaceUsesOfWith(Scalar, Ex); 1609 } 1610 } else { 1611 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1612 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1613 CSEBlocks.insert(&F->getEntryBlock()); 1614 User->replaceUsesOfWith(Scalar, Ex); 1615 } 1616 1617 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1618 } 1619 1620 // For each vectorized value: 1621 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 1622 TreeEntry *Entry = &VectorizableTree[EIdx]; 1623 1624 // For each lane: 1625 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1626 Value *Scalar = Entry->Scalars[Lane]; 1627 1628 // No need to handle users of gathered values. 1629 if (Entry->NeedToGather) 1630 continue; 1631 1632 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 1633 1634 Type *Ty = Scalar->getType(); 1635 if (!Ty->isVoidTy()) { 1636 for (Value::use_iterator User = Scalar->use_begin(), 1637 UE = Scalar->use_end(); User != UE; ++User) { 1638 DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n"); 1639 1640 assert((ScalarToTreeEntry.count(*User) || 1641 // It is legal to replace the reduction users by undef. 1642 (RdxOps && RdxOps->count(*User))) && 1643 "Replacing out-of-tree value with undef"); 1644 } 1645 Value *Undef = UndefValue::get(Ty); 1646 Scalar->replaceAllUsesWith(Undef); 1647 } 1648 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 1649 cast<Instruction>(Scalar)->eraseFromParent(); 1650 } 1651 } 1652 1653 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 1654 BlocksNumbers[it].forget(); 1655 } 1656 Builder.ClearInsertionPoint(); 1657 1658 return VectorizableTree[0].VectorizedValue; 1659} 1660 1661class DTCmp { 1662 const DominatorTree *DT; 1663 1664public: 1665 DTCmp(const DominatorTree *DT) : DT(DT) {} 1666 bool operator()(const BasicBlock *A, const BasicBlock *B) const { 1667 return DT->properlyDominates(A, B); 1668 } 1669}; 1670 1671void BoUpSLP::optimizeGatherSequence() { 1672 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 1673 << " gather sequences instructions.\n"); 1674 // LICM InsertElementInst sequences. 1675 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 1676 e = GatherSeq.end(); it != e; ++it) { 1677 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 1678 1679 if (!Insert) 1680 continue; 1681 1682 // Check if this block is inside a loop. 1683 Loop *L = LI->getLoopFor(Insert->getParent()); 1684 if (!L) 1685 continue; 1686 1687 // Check if it has a preheader. 1688 BasicBlock *PreHeader = L->getLoopPreheader(); 1689 if (!PreHeader) 1690 continue; 1691 1692 // If the vector or the element that we insert into it are 1693 // instructions that are defined in this basic block then we can't 1694 // hoist this instruction. 1695 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 1696 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 1697 if (CurrVec && L->contains(CurrVec)) 1698 continue; 1699 if (NewElem && L->contains(NewElem)) 1700 continue; 1701 1702 // We can hoist this instruction. Move it to the pre-header. 1703 Insert->moveBefore(PreHeader->getTerminator()); 1704 } 1705 1706 // Sort blocks by domination. This ensures we visit a block after all blocks 1707 // dominating it are visited. 1708 SmallVector<BasicBlock *, 8> CSEWorkList(CSEBlocks.begin(), CSEBlocks.end()); 1709 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), DTCmp(DT)); 1710 1711 // Perform O(N^2) search over the gather sequences and merge identical 1712 // instructions. TODO: We can further optimize this scan if we split the 1713 // instructions into different buckets based on the insert lane. 1714 SmallVector<Instruction *, 16> Visited; 1715 for (SmallVectorImpl<BasicBlock *>::iterator I = CSEWorkList.begin(), 1716 E = CSEWorkList.end(); 1717 I != E; ++I) { 1718 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *llvm::prior(I))) && 1719 "Worklist not sorted properly!"); 1720 BasicBlock *BB = *I; 1721 // For all instructions in blocks containing gather sequences: 1722 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 1723 Instruction *In = it++; 1724 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 1725 continue; 1726 1727 // Check if we can replace this instruction with any of the 1728 // visited instructions. 1729 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 1730 ve = Visited.end(); 1731 v != ve; ++v) { 1732 if (In->isIdenticalTo(*v) && 1733 DT->dominates((*v)->getParent(), In->getParent())) { 1734 In->replaceAllUsesWith(*v); 1735 In->eraseFromParent(); 1736 In = 0; 1737 break; 1738 } 1739 } 1740 if (In) { 1741 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 1742 Visited.push_back(In); 1743 } 1744 } 1745 } 1746 CSEBlocks.clear(); 1747 GatherSeq.clear(); 1748} 1749 1750/// The SLPVectorizer Pass. 1751struct SLPVectorizer : public FunctionPass { 1752 typedef SmallVector<StoreInst *, 8> StoreList; 1753 typedef MapVector<Value *, StoreList> StoreListMap; 1754 1755 /// Pass identification, replacement for typeid 1756 static char ID; 1757 1758 explicit SLPVectorizer() : FunctionPass(ID) { 1759 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 1760 } 1761 1762 ScalarEvolution *SE; 1763 DataLayout *DL; 1764 TargetTransformInfo *TTI; 1765 AliasAnalysis *AA; 1766 LoopInfo *LI; 1767 DominatorTree *DT; 1768 1769 virtual bool runOnFunction(Function &F) { 1770 SE = &getAnalysis<ScalarEvolution>(); 1771 DL = getAnalysisIfAvailable<DataLayout>(); 1772 TTI = &getAnalysis<TargetTransformInfo>(); 1773 AA = &getAnalysis<AliasAnalysis>(); 1774 LI = &getAnalysis<LoopInfo>(); 1775 DT = &getAnalysis<DominatorTree>(); 1776 1777 StoreRefs.clear(); 1778 bool Changed = false; 1779 1780 // If the target claims to have no vector registers don't attempt 1781 // vectorization. 1782 if (!TTI->getNumberOfRegisters(true)) 1783 return false; 1784 1785 // Must have DataLayout. We can't require it because some tests run w/o 1786 // triple. 1787 if (!DL) 1788 return false; 1789 1790 // Don't vectorize when the attribute NoImplicitFloat is used. 1791 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 1792 return false; 1793 1794 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 1795 1796 // Use the bollom up slp vectorizer to construct chains that start with 1797 // he store instructions. 1798 BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT); 1799 1800 // Scan the blocks in the function in post order. 1801 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 1802 e = po_end(&F.getEntryBlock()); it != e; ++it) { 1803 BasicBlock *BB = *it; 1804 1805 // Vectorize trees that end at stores. 1806 if (unsigned count = collectStores(BB, R)) { 1807 (void)count; 1808 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 1809 Changed |= vectorizeStoreChains(R); 1810 } 1811 1812 // Vectorize trees that end at reductions. 1813 Changed |= vectorizeChainsInBlock(BB, R); 1814 } 1815 1816 if (Changed) { 1817 R.optimizeGatherSequence(); 1818 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 1819 DEBUG(verifyFunction(F)); 1820 } 1821 return Changed; 1822 } 1823 1824 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1825 FunctionPass::getAnalysisUsage(AU); 1826 AU.addRequired<ScalarEvolution>(); 1827 AU.addRequired<AliasAnalysis>(); 1828 AU.addRequired<TargetTransformInfo>(); 1829 AU.addRequired<LoopInfo>(); 1830 AU.addRequired<DominatorTree>(); 1831 AU.addPreserved<LoopInfo>(); 1832 AU.addPreserved<DominatorTree>(); 1833 AU.setPreservesCFG(); 1834 } 1835 1836private: 1837 1838 /// \brief Collect memory references and sort them according to their base 1839 /// object. We sort the stores to their base objects to reduce the cost of the 1840 /// quadratic search on the stores. TODO: We can further reduce this cost 1841 /// if we flush the chain creation every time we run into a memory barrier. 1842 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 1843 1844 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 1845 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 1846 1847 /// \brief Try to vectorize a list of operands. 1848 /// \returns true if a value was vectorized. 1849 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R); 1850 1851 /// \brief Try to vectorize a chain that may start at the operands of \V; 1852 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 1853 1854 /// \brief Vectorize the stores that were collected in StoreRefs. 1855 bool vectorizeStoreChains(BoUpSLP &R); 1856 1857 /// \brief Scan the basic block and look for patterns that are likely to start 1858 /// a vectorization chain. 1859 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 1860 1861 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 1862 BoUpSLP &R); 1863 1864 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 1865 BoUpSLP &R); 1866private: 1867 StoreListMap StoreRefs; 1868}; 1869 1870/// \brief Check that the Values in the slice in VL array are still existant in 1871/// the WeakVH array. 1872/// Vectorization of part of the VL array may cause later values in the VL array 1873/// to become invalid. We track when this has happened in the WeakVH array. 1874static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 1875 SmallVectorImpl<WeakVH> &VH, 1876 unsigned SliceBegin, 1877 unsigned SliceSize) { 1878 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 1879 if (VH[i] != VL[i]) 1880 return true; 1881 1882 return false; 1883} 1884 1885bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 1886 int CostThreshold, BoUpSLP &R) { 1887 unsigned ChainLen = Chain.size(); 1888 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 1889 << "\n"); 1890 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 1891 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 1892 unsigned VF = MinVecRegSize / Sz; 1893 1894 if (!isPowerOf2_32(Sz) || VF < 2) 1895 return false; 1896 1897 // Keep track of values that were delete by vectorizing in the loop below. 1898 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 1899 1900 bool Changed = false; 1901 // Look for profitable vectorizable trees at all offsets, starting at zero. 1902 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 1903 if (i + VF > e) 1904 break; 1905 1906 // Check that a previous iteration of this loop did not delete the Value. 1907 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 1908 continue; 1909 1910 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 1911 << "\n"); 1912 ArrayRef<Value *> Operands = Chain.slice(i, VF); 1913 1914 R.buildTree(Operands); 1915 1916 int Cost = R.getTreeCost(); 1917 1918 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 1919 if (Cost < CostThreshold) { 1920 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 1921 R.vectorizeTree(); 1922 1923 // Move to the next bundle. 1924 i += VF - 1; 1925 Changed = true; 1926 } 1927 } 1928 1929 return Changed; 1930} 1931 1932bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 1933 int costThreshold, BoUpSLP &R) { 1934 SetVector<Value *> Heads, Tails; 1935 SmallDenseMap<Value *, Value *> ConsecutiveChain; 1936 1937 // We may run into multiple chains that merge into a single chain. We mark the 1938 // stores that we vectorized so that we don't visit the same store twice. 1939 BoUpSLP::ValueSet VectorizedStores; 1940 bool Changed = false; 1941 1942 // Do a quadratic search on all of the given stores and find 1943 // all of the pairs of stores that follow each other. 1944 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 1945 for (unsigned j = 0; j < e; ++j) { 1946 if (i == j) 1947 continue; 1948 1949 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 1950 Tails.insert(Stores[j]); 1951 Heads.insert(Stores[i]); 1952 ConsecutiveChain[Stores[i]] = Stores[j]; 1953 } 1954 } 1955 } 1956 1957 // For stores that start but don't end a link in the chain: 1958 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 1959 it != e; ++it) { 1960 if (Tails.count(*it)) 1961 continue; 1962 1963 // We found a store instr that starts a chain. Now follow the chain and try 1964 // to vectorize it. 1965 BoUpSLP::ValueList Operands; 1966 Value *I = *it; 1967 // Collect the chain into a list. 1968 while (Tails.count(I) || Heads.count(I)) { 1969 if (VectorizedStores.count(I)) 1970 break; 1971 Operands.push_back(I); 1972 // Move to the next value in the chain. 1973 I = ConsecutiveChain[I]; 1974 } 1975 1976 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 1977 1978 // Mark the vectorized stores so that we don't vectorize them again. 1979 if (Vectorized) 1980 VectorizedStores.insert(Operands.begin(), Operands.end()); 1981 Changed |= Vectorized; 1982 } 1983 1984 return Changed; 1985} 1986 1987 1988unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 1989 unsigned count = 0; 1990 StoreRefs.clear(); 1991 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 1992 StoreInst *SI = dyn_cast<StoreInst>(it); 1993 if (!SI) 1994 continue; 1995 1996 // Don't touch volatile stores. 1997 if (!SI->isSimple()) 1998 continue; 1999 2000 // Check that the pointer points to scalars. 2001 Type *Ty = SI->getValueOperand()->getType(); 2002 if (Ty->isAggregateType() || Ty->isVectorTy()) 2003 return 0; 2004 2005 // Find the base pointer. 2006 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 2007 2008 // Save the store locations. 2009 StoreRefs[Ptr].push_back(SI); 2010 count++; 2011 } 2012 return count; 2013} 2014 2015bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 2016 if (!A || !B) 2017 return false; 2018 Value *VL[] = { A, B }; 2019 return tryToVectorizeList(VL, R); 2020} 2021 2022bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) { 2023 if (VL.size() < 2) 2024 return false; 2025 2026 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 2027 2028 // Check that all of the parts are scalar instructions of the same type. 2029 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 2030 if (!I0) 2031 return false; 2032 2033 unsigned Opcode0 = I0->getOpcode(); 2034 2035 Type *Ty0 = I0->getType(); 2036 unsigned Sz = DL->getTypeSizeInBits(Ty0); 2037 unsigned VF = MinVecRegSize / Sz; 2038 2039 for (int i = 0, e = VL.size(); i < e; ++i) { 2040 Type *Ty = VL[i]->getType(); 2041 if (Ty->isAggregateType() || Ty->isVectorTy()) 2042 return false; 2043 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 2044 if (!Inst || Inst->getOpcode() != Opcode0) 2045 return false; 2046 } 2047 2048 bool Changed = false; 2049 2050 // Keep track of values that were delete by vectorizing in the loop below. 2051 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 2052 2053 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2054 unsigned OpsWidth = 0; 2055 2056 if (i + VF > e) 2057 OpsWidth = e - i; 2058 else 2059 OpsWidth = VF; 2060 2061 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 2062 break; 2063 2064 // Check that a previous iteration of this loop did not delete the Value. 2065 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 2066 continue; 2067 2068 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 2069 << "\n"); 2070 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 2071 2072 R.buildTree(Ops); 2073 int Cost = R.getTreeCost(); 2074 2075 if (Cost < -SLPCostThreshold) { 2076 DEBUG(dbgs() << "SLP: Vectorizing pair at cost:" << Cost << ".\n"); 2077 R.vectorizeTree(); 2078 2079 // Move to the next bundle. 2080 i += VF - 1; 2081 Changed = true; 2082 } 2083 } 2084 2085 return Changed; 2086} 2087 2088bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 2089 if (!V) 2090 return false; 2091 2092 // Try to vectorize V. 2093 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 2094 return true; 2095 2096 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 2097 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 2098 // Try to skip B. 2099 if (B && B->hasOneUse()) { 2100 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 2101 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 2102 if (tryToVectorizePair(A, B0, R)) { 2103 B->moveBefore(V); 2104 return true; 2105 } 2106 if (tryToVectorizePair(A, B1, R)) { 2107 B->moveBefore(V); 2108 return true; 2109 } 2110 } 2111 2112 // Try to skip A. 2113 if (A && A->hasOneUse()) { 2114 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 2115 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 2116 if (tryToVectorizePair(A0, B, R)) { 2117 A->moveBefore(V); 2118 return true; 2119 } 2120 if (tryToVectorizePair(A1, B, R)) { 2121 A->moveBefore(V); 2122 return true; 2123 } 2124 } 2125 return 0; 2126} 2127 2128/// \brief Generate a shuffle mask to be used in a reduction tree. 2129/// 2130/// \param VecLen The length of the vector to be reduced. 2131/// \param NumEltsToRdx The number of elements that should be reduced in the 2132/// vector. 2133/// \param IsPairwise Whether the reduction is a pairwise or splitting 2134/// reduction. A pairwise reduction will generate a mask of 2135/// <0,2,...> or <1,3,..> while a splitting reduction will generate 2136/// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 2137/// \param IsLeft True will generate a mask of even elements, odd otherwise. 2138static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 2139 bool IsPairwise, bool IsLeft, 2140 IRBuilder<> &Builder) { 2141 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 2142 2143 SmallVector<Constant *, 32> ShuffleMask( 2144 VecLen, UndefValue::get(Builder.getInt32Ty())); 2145 2146 if (IsPairwise) 2147 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 2148 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2149 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 2150 else 2151 // Move the upper half of the vector to the lower half. 2152 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2153 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 2154 2155 return ConstantVector::get(ShuffleMask); 2156} 2157 2158 2159/// Model horizontal reductions. 2160/// 2161/// A horizontal reduction is a tree of reduction operations (currently add and 2162/// fadd) that has operations that can be put into a vector as its leaf. 2163/// For example, this tree: 2164/// 2165/// mul mul mul mul 2166/// \ / \ / 2167/// + + 2168/// \ / 2169/// + 2170/// This tree has "mul" as its reduced values and "+" as its reduction 2171/// operations. A reduction might be feeding into a store or a binary operation 2172/// feeding a phi. 2173/// ... 2174/// \ / 2175/// + 2176/// | 2177/// phi += 2178/// 2179/// Or: 2180/// ... 2181/// \ / 2182/// + 2183/// | 2184/// *p = 2185/// 2186class HorizontalReduction { 2187 SmallPtrSet<Value *, 16> ReductionOps; 2188 SmallVector<Value *, 32> ReducedVals; 2189 2190 BinaryOperator *ReductionRoot; 2191 PHINode *ReductionPHI; 2192 2193 /// The opcode of the reduction. 2194 unsigned ReductionOpcode; 2195 /// The opcode of the values we perform a reduction on. 2196 unsigned ReducedValueOpcode; 2197 /// The width of one full horizontal reduction operation. 2198 unsigned ReduxWidth; 2199 /// Should we model this reduction as a pairwise reduction tree or a tree that 2200 /// splits the vector in halves and adds those halves. 2201 bool IsPairwiseReduction; 2202 2203public: 2204 HorizontalReduction() 2205 : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0), 2206 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 2207 2208 /// \brief Try to find a reduction tree. 2209 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 2210 DataLayout *DL) { 2211 assert((!Phi || 2212 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 2213 "Thi phi needs to use the binary operator"); 2214 2215 // We could have a initial reductions that is not an add. 2216 // r *= v1 + v2 + v3 + v4 2217 // In such a case start looking for a tree rooted in the first '+'. 2218 if (Phi) { 2219 if (B->getOperand(0) == Phi) { 2220 Phi = 0; 2221 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 2222 } else if (B->getOperand(1) == Phi) { 2223 Phi = 0; 2224 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 2225 } 2226 } 2227 2228 if (!B) 2229 return false; 2230 2231 Type *Ty = B->getType(); 2232 if (Ty->isVectorTy()) 2233 return false; 2234 2235 ReductionOpcode = B->getOpcode(); 2236 ReducedValueOpcode = 0; 2237 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 2238 ReductionRoot = B; 2239 ReductionPHI = Phi; 2240 2241 if (ReduxWidth < 4) 2242 return false; 2243 2244 // We currently only support adds. 2245 if (ReductionOpcode != Instruction::Add && 2246 ReductionOpcode != Instruction::FAdd) 2247 return false; 2248 2249 // Post order traverse the reduction tree starting at B. We only handle true 2250 // trees containing only binary operators. 2251 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 2252 Stack.push_back(std::make_pair(B, 0)); 2253 while (!Stack.empty()) { 2254 BinaryOperator *TreeN = Stack.back().first; 2255 unsigned EdgeToVist = Stack.back().second++; 2256 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 2257 2258 // Only handle trees in the current basic block. 2259 if (TreeN->getParent() != B->getParent()) 2260 return false; 2261 2262 // Each tree node needs to have one user except for the ultimate 2263 // reduction. 2264 if (!TreeN->hasOneUse() && TreeN != B) 2265 return false; 2266 2267 // Postorder vist. 2268 if (EdgeToVist == 2 || IsReducedValue) { 2269 if (IsReducedValue) { 2270 // Make sure that the opcodes of the operations that we are going to 2271 // reduce match. 2272 if (!ReducedValueOpcode) 2273 ReducedValueOpcode = TreeN->getOpcode(); 2274 else if (ReducedValueOpcode != TreeN->getOpcode()) 2275 return false; 2276 ReducedVals.push_back(TreeN); 2277 } else { 2278 // We need to be able to reassociate the adds. 2279 if (!TreeN->isAssociative()) 2280 return false; 2281 ReductionOps.insert(TreeN); 2282 } 2283 // Retract. 2284 Stack.pop_back(); 2285 continue; 2286 } 2287 2288 // Visit left or right. 2289 Value *NextV = TreeN->getOperand(EdgeToVist); 2290 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 2291 if (Next) 2292 Stack.push_back(std::make_pair(Next, 0)); 2293 else if (NextV != Phi) 2294 return false; 2295 } 2296 return true; 2297 } 2298 2299 /// \brief Attempt to vectorize the tree found by 2300 /// matchAssociativeReduction. 2301 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 2302 if (ReducedVals.empty()) 2303 return false; 2304 2305 unsigned NumReducedVals = ReducedVals.size(); 2306 if (NumReducedVals < ReduxWidth) 2307 return false; 2308 2309 Value *VectorizedTree = 0; 2310 IRBuilder<> Builder(ReductionRoot); 2311 FastMathFlags Unsafe; 2312 Unsafe.setUnsafeAlgebra(); 2313 Builder.SetFastMathFlags(Unsafe); 2314 unsigned i = 0; 2315 2316 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 2317 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth); 2318 V.buildTree(ValsToReduce, &ReductionOps); 2319 2320 // Estimate cost. 2321 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 2322 if (Cost >= -SLPCostThreshold) 2323 break; 2324 2325 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 2326 << ". (HorRdx)\n"); 2327 2328 // Vectorize a tree. 2329 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 2330 Value *VectorizedRoot = V.vectorizeTree(); 2331 2332 // Emit a reduction. 2333 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 2334 if (VectorizedTree) { 2335 Builder.SetCurrentDebugLocation(Loc); 2336 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2337 ReducedSubTree, "bin.rdx"); 2338 } else 2339 VectorizedTree = ReducedSubTree; 2340 } 2341 2342 if (VectorizedTree) { 2343 // Finish the reduction. 2344 for (; i < NumReducedVals; ++i) { 2345 Builder.SetCurrentDebugLocation( 2346 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 2347 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2348 ReducedVals[i]); 2349 } 2350 // Update users. 2351 if (ReductionPHI) { 2352 assert(ReductionRoot != NULL && "Need a reduction operation"); 2353 ReductionRoot->setOperand(0, VectorizedTree); 2354 ReductionRoot->setOperand(1, ReductionPHI); 2355 } else 2356 ReductionRoot->replaceAllUsesWith(VectorizedTree); 2357 } 2358 return VectorizedTree != 0; 2359 } 2360 2361private: 2362 2363 /// \brief Calcuate the cost of a reduction. 2364 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 2365 Type *ScalarTy = FirstReducedVal->getType(); 2366 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 2367 2368 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 2369 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 2370 2371 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 2372 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 2373 2374 int ScalarReduxCost = 2375 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 2376 2377 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 2378 << " for reduction that starts with " << *FirstReducedVal 2379 << " (It is a " 2380 << (IsPairwiseReduction ? "pairwise" : "splitting") 2381 << " reduction)\n"); 2382 2383 return VecReduxCost - ScalarReduxCost; 2384 } 2385 2386 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 2387 Value *R, const Twine &Name = "") { 2388 if (Opcode == Instruction::FAdd) 2389 return Builder.CreateFAdd(L, R, Name); 2390 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 2391 } 2392 2393 /// \brief Emit a horizontal reduction of the vectorized value. 2394 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 2395 assert(VectorizedValue && "Need to have a vectorized tree node"); 2396 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 2397 assert(isPowerOf2_32(ReduxWidth) && 2398 "We only handle power-of-two reductions for now"); 2399 2400 Value *TmpVec = ValToReduce; 2401 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 2402 if (IsPairwiseReduction) { 2403 Value *LeftMask = 2404 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 2405 Value *RightMask = 2406 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 2407 2408 Value *LeftShuf = Builder.CreateShuffleVector( 2409 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 2410 Value *RightShuf = Builder.CreateShuffleVector( 2411 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 2412 "rdx.shuf.r"); 2413 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 2414 "bin.rdx"); 2415 } else { 2416 Value *UpperHalf = 2417 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 2418 Value *Shuf = Builder.CreateShuffleVector( 2419 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 2420 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 2421 } 2422 } 2423 2424 // The result is in the first element of the vector. 2425 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 2426 } 2427}; 2428 2429/// \brief Recognize construction of vectors like 2430/// %ra = insertelement <4 x float> undef, float %s0, i32 0 2431/// %rb = insertelement <4 x float> %ra, float %s1, i32 1 2432/// %rc = insertelement <4 x float> %rb, float %s2, i32 2 2433/// %rd = insertelement <4 x float> %rc, float %s3, i32 3 2434/// 2435/// Returns true if it matches 2436/// 2437static bool findBuildVector(InsertElementInst *IE, 2438 SmallVectorImpl<Value *> &Ops) { 2439 if (!isa<UndefValue>(IE->getOperand(0))) 2440 return false; 2441 2442 while (true) { 2443 Ops.push_back(IE->getOperand(1)); 2444 2445 if (IE->use_empty()) 2446 return false; 2447 2448 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back()); 2449 if (!NextUse) 2450 return true; 2451 2452 // If this isn't the final use, make sure the next insertelement is the only 2453 // use. It's OK if the final constructed vector is used multiple times 2454 if (!IE->hasOneUse()) 2455 return false; 2456 2457 IE = NextUse; 2458 } 2459 2460 return false; 2461} 2462 2463static bool PhiTypeSorterFunc(Value *V, Value *V2) { 2464 return V->getType() < V2->getType(); 2465} 2466 2467bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 2468 bool Changed = false; 2469 SmallVector<Value *, 4> Incoming; 2470 SmallSet<Value *, 16> VisitedInstrs; 2471 2472 bool HaveVectorizedPhiNodes = true; 2473 while (HaveVectorizedPhiNodes) { 2474 HaveVectorizedPhiNodes = false; 2475 2476 // Collect the incoming values from the PHIs. 2477 Incoming.clear(); 2478 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 2479 ++instr) { 2480 PHINode *P = dyn_cast<PHINode>(instr); 2481 if (!P) 2482 break; 2483 2484 if (!VisitedInstrs.count(P)) 2485 Incoming.push_back(P); 2486 } 2487 2488 // Sort by type. 2489 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 2490 2491 // Try to vectorize elements base on their type. 2492 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 2493 E = Incoming.end(); 2494 IncIt != E;) { 2495 2496 // Look for the next elements with the same type. 2497 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 2498 while (SameTypeIt != E && 2499 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 2500 VisitedInstrs.insert(*SameTypeIt); 2501 ++SameTypeIt; 2502 } 2503 2504 // Try to vectorize them. 2505 unsigned NumElts = (SameTypeIt - IncIt); 2506 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 2507 if (NumElts > 1 && 2508 tryToVectorizeList(ArrayRef<Value *>(IncIt, NumElts), R)) { 2509 // Success start over because instructions might have been changed. 2510 HaveVectorizedPhiNodes = true; 2511 Changed = true; 2512 break; 2513 } 2514 2515 // Start over at the next instruction of a differnt type (or the end). 2516 IncIt = SameTypeIt; 2517 } 2518 } 2519 2520 VisitedInstrs.clear(); 2521 2522 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 2523 // We may go through BB multiple times so skip the one we have checked. 2524 if (!VisitedInstrs.insert(it)) 2525 continue; 2526 2527 if (isa<DbgInfoIntrinsic>(it)) 2528 continue; 2529 2530 // Try to vectorize reductions that use PHINodes. 2531 if (PHINode *P = dyn_cast<PHINode>(it)) { 2532 // Check that the PHI is a reduction PHI. 2533 if (P->getNumIncomingValues() != 2) 2534 return Changed; 2535 Value *Rdx = 2536 (P->getIncomingBlock(0) == BB 2537 ? (P->getIncomingValue(0)) 2538 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0)); 2539 // Check if this is a Binary Operator. 2540 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 2541 if (!BI) 2542 continue; 2543 2544 // Try to match and vectorize a horizontal reduction. 2545 HorizontalReduction HorRdx; 2546 if (ShouldVectorizeHor && 2547 HorRdx.matchAssociativeReduction(P, BI, DL) && 2548 HorRdx.tryToReduce(R, TTI)) { 2549 Changed = true; 2550 it = BB->begin(); 2551 e = BB->end(); 2552 continue; 2553 } 2554 2555 Value *Inst = BI->getOperand(0); 2556 if (Inst == P) 2557 Inst = BI->getOperand(1); 2558 2559 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 2560 // We would like to start over since some instructions are deleted 2561 // and the iterator may become invalid value. 2562 Changed = true; 2563 it = BB->begin(); 2564 e = BB->end(); 2565 continue; 2566 } 2567 2568 continue; 2569 } 2570 2571 // Try to vectorize horizontal reductions feeding into a store. 2572 if (ShouldStartVectorizeHorAtStore) 2573 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 2574 if (BinaryOperator *BinOp = 2575 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 2576 HorizontalReduction HorRdx; 2577 if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) && 2578 HorRdx.tryToReduce(R, TTI)) || 2579 tryToVectorize(BinOp, R))) { 2580 Changed = true; 2581 it = BB->begin(); 2582 e = BB->end(); 2583 continue; 2584 } 2585 } 2586 2587 // Try to vectorize trees that start at compare instructions. 2588 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 2589 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 2590 Changed = true; 2591 // We would like to start over since some instructions are deleted 2592 // and the iterator may become invalid value. 2593 it = BB->begin(); 2594 e = BB->end(); 2595 continue; 2596 } 2597 2598 for (int i = 0; i < 2; ++i) { 2599 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 2600 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 2601 Changed = true; 2602 // We would like to start over since some instructions are deleted 2603 // and the iterator may become invalid value. 2604 it = BB->begin(); 2605 e = BB->end(); 2606 } 2607 } 2608 } 2609 continue; 2610 } 2611 2612 // Try to vectorize trees that start at insertelement instructions. 2613 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) { 2614 SmallVector<Value *, 8> Ops; 2615 if (!findBuildVector(IE, Ops)) 2616 continue; 2617 2618 if (tryToVectorizeList(Ops, R)) { 2619 Changed = true; 2620 it = BB->begin(); 2621 e = BB->end(); 2622 } 2623 2624 continue; 2625 } 2626 } 2627 2628 return Changed; 2629} 2630 2631bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 2632 bool Changed = false; 2633 // Attempt to sort and vectorize each of the store-groups. 2634 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 2635 it != e; ++it) { 2636 if (it->second.size() < 2) 2637 continue; 2638 2639 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 2640 << it->second.size() << ".\n"); 2641 2642 // Process the stores in chunks of 16. 2643 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 2644 unsigned Len = std::min<unsigned>(CE - CI, 16); 2645 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 2646 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 2647 } 2648 } 2649 return Changed; 2650} 2651 2652} // end anonymous namespace 2653 2654char SLPVectorizer::ID = 0; 2655static const char lv_name[] = "SLP Vectorizer"; 2656INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 2657INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 2658INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 2659INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 2660INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 2661INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 2662 2663namespace llvm { 2664Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 2665} 2666