1//===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass implements an idiom recognizer that transforms simple loops into a 11// non-loop form. In cases that this kicks in, it can be a significant 12// performance win. 13// 14//===----------------------------------------------------------------------===// 15// 16// TODO List: 17// 18// Future loop memory idioms to recognize: 19// memcmp, memmove, strlen, etc. 20// Future floating point idioms to recognize in -ffast-math mode: 21// fpowi 22// Future integer operation idioms to recognize: 23// ctpop, ctlz, cttz 24// 25// Beware that isel's default lowering for ctpop is highly inefficient for 26// i64 and larger types when i64 is legal and the value has few bits set. It 27// would be good to enhance isel to emit a loop for ctpop in this case. 28// 29// This could recognize common matrix multiplies and dot product idioms and 30// replace them with calls to BLAS (if linked in??). 31// 32//===----------------------------------------------------------------------===// 33 34#include "llvm/Transforms/Scalar/LoopIdiomRecognize.h" 35#include "llvm/ADT/MapVector.h" 36#include "llvm/ADT/SetVector.h" 37#include "llvm/ADT/Statistic.h" 38#include "llvm/Analysis/AliasAnalysis.h" 39#include "llvm/Analysis/BasicAliasAnalysis.h" 40#include "llvm/Analysis/GlobalsModRef.h" 41#include "llvm/Analysis/LoopAccessAnalysis.h" 42#include "llvm/Analysis/LoopPass.h" 43#include "llvm/Analysis/LoopPassManager.h" 44#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 45#include "llvm/Analysis/ScalarEvolutionExpander.h" 46#include "llvm/Analysis/ScalarEvolutionExpressions.h" 47#include "llvm/Analysis/TargetLibraryInfo.h" 48#include "llvm/Analysis/TargetTransformInfo.h" 49#include "llvm/Analysis/ValueTracking.h" 50#include "llvm/IR/DataLayout.h" 51#include "llvm/IR/Dominators.h" 52#include "llvm/IR/IRBuilder.h" 53#include "llvm/IR/IntrinsicInst.h" 54#include "llvm/IR/Module.h" 55#include "llvm/Support/Debug.h" 56#include "llvm/Support/raw_ostream.h" 57#include "llvm/Transforms/Scalar.h" 58#include "llvm/Transforms/Utils/BuildLibCalls.h" 59#include "llvm/Transforms/Utils/Local.h" 60#include "llvm/Transforms/Utils/LoopUtils.h" 61using namespace llvm; 62 63#define DEBUG_TYPE "loop-idiom" 64 65STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 66STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 67 68namespace { 69 70class LoopIdiomRecognize { 71 Loop *CurLoop; 72 AliasAnalysis *AA; 73 DominatorTree *DT; 74 LoopInfo *LI; 75 ScalarEvolution *SE; 76 TargetLibraryInfo *TLI; 77 const TargetTransformInfo *TTI; 78 const DataLayout *DL; 79 80public: 81 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT, 82 LoopInfo *LI, ScalarEvolution *SE, 83 TargetLibraryInfo *TLI, 84 const TargetTransformInfo *TTI, 85 const DataLayout *DL) 86 : CurLoop(nullptr), AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), 87 DL(DL) {} 88 89 bool runOnLoop(Loop *L); 90 91private: 92 typedef SmallVector<StoreInst *, 8> StoreList; 93 typedef MapVector<Value *, StoreList> StoreListMap; 94 StoreListMap StoreRefsForMemset; 95 StoreListMap StoreRefsForMemsetPattern; 96 StoreList StoreRefsForMemcpy; 97 bool HasMemset; 98 bool HasMemsetPattern; 99 bool HasMemcpy; 100 101 /// \name Countable Loop Idiom Handling 102 /// @{ 103 104 bool runOnCountableLoop(); 105 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 106 SmallVectorImpl<BasicBlock *> &ExitBlocks); 107 108 void collectStores(BasicBlock *BB); 109 bool isLegalStore(StoreInst *SI, bool &ForMemset, bool &ForMemsetPattern, 110 bool &ForMemcpy); 111 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 112 bool ForMemset); 113 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 114 115 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 116 unsigned StoreAlignment, Value *StoredVal, 117 Instruction *TheStore, 118 SmallPtrSetImpl<Instruction *> &Stores, 119 const SCEVAddRecExpr *Ev, const SCEV *BECount, 120 bool NegStride); 121 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 122 123 /// @} 124 /// \name Noncountable Loop Idiom Handling 125 /// @{ 126 127 bool runOnNoncountableLoop(); 128 129 bool recognizePopcount(); 130 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst, 131 PHINode *CntPhi, Value *Var); 132 133 /// @} 134}; 135 136class LoopIdiomRecognizeLegacyPass : public LoopPass { 137public: 138 static char ID; 139 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) { 140 initializeLoopIdiomRecognizeLegacyPassPass( 141 *PassRegistry::getPassRegistry()); 142 } 143 144 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 145 if (skipLoop(L)) 146 return false; 147 148 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 149 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 150 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 151 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 152 TargetLibraryInfo *TLI = 153 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 154 const TargetTransformInfo *TTI = 155 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 156 *L->getHeader()->getParent()); 157 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout(); 158 159 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL); 160 return LIR.runOnLoop(L); 161 } 162 163 /// This transformation requires natural loop information & requires that 164 /// loop preheaders be inserted into the CFG. 165 /// 166 void getAnalysisUsage(AnalysisUsage &AU) const override { 167 AU.addRequired<TargetLibraryInfoWrapperPass>(); 168 AU.addRequired<TargetTransformInfoWrapperPass>(); 169 getLoopAnalysisUsage(AU); 170 } 171}; 172} // End anonymous namespace. 173 174PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, 175 AnalysisManager<Loop> &AM) { 176 const auto &FAM = 177 AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager(); 178 Function *F = L.getHeader()->getParent(); 179 180 // Use getCachedResult because Loop pass cannot trigger a function analysis. 181 auto *AA = FAM.getCachedResult<AAManager>(*F); 182 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F); 183 auto *LI = FAM.getCachedResult<LoopAnalysis>(*F); 184 auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F); 185 auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(*F); 186 const auto *TTI = FAM.getCachedResult<TargetIRAnalysis>(*F); 187 const auto *DL = &L.getHeader()->getModule()->getDataLayout(); 188 assert((AA && DT && LI && SE && TLI && TTI && DL) && 189 "Analyses for Loop Idiom Recognition not available"); 190 191 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL); 192 if (!LIR.runOnLoop(&L)) 193 return PreservedAnalyses::all(); 194 195 return getLoopPassPreservedAnalyses(); 196} 197 198char LoopIdiomRecognizeLegacyPass::ID = 0; 199INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom", 200 "Recognize loop idioms", false, false) 201INITIALIZE_PASS_DEPENDENCY(LoopPass) 202INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 203INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 204INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom", 205 "Recognize loop idioms", false, false) 206 207Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); } 208 209static void deleteDeadInstruction(Instruction *I) { 210 I->replaceAllUsesWith(UndefValue::get(I->getType())); 211 I->eraseFromParent(); 212} 213 214//===----------------------------------------------------------------------===// 215// 216// Implementation of LoopIdiomRecognize 217// 218//===----------------------------------------------------------------------===// 219 220bool LoopIdiomRecognize::runOnLoop(Loop *L) { 221 CurLoop = L; 222 // If the loop could not be converted to canonical form, it must have an 223 // indirectbr in it, just give up. 224 if (!L->getLoopPreheader()) 225 return false; 226 227 // Disable loop idiom recognition if the function's name is a common idiom. 228 StringRef Name = L->getHeader()->getParent()->getName(); 229 if (Name == "memset" || Name == "memcpy") 230 return false; 231 232 HasMemset = TLI->has(LibFunc::memset); 233 HasMemsetPattern = TLI->has(LibFunc::memset_pattern16); 234 HasMemcpy = TLI->has(LibFunc::memcpy); 235 236 if (HasMemset || HasMemsetPattern || HasMemcpy) 237 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 238 return runOnCountableLoop(); 239 240 return runOnNoncountableLoop(); 241} 242 243bool LoopIdiomRecognize::runOnCountableLoop() { 244 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); 245 assert(!isa<SCEVCouldNotCompute>(BECount) && 246 "runOnCountableLoop() called on a loop without a predictable" 247 "backedge-taken count"); 248 249 // If this loop executes exactly one time, then it should be peeled, not 250 // optimized by this pass. 251 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 252 if (BECst->getAPInt() == 0) 253 return false; 254 255 SmallVector<BasicBlock *, 8> ExitBlocks; 256 CurLoop->getUniqueExitBlocks(ExitBlocks); 257 258 DEBUG(dbgs() << "loop-idiom Scanning: F[" 259 << CurLoop->getHeader()->getParent()->getName() << "] Loop %" 260 << CurLoop->getHeader()->getName() << "\n"); 261 262 bool MadeChange = false; 263 264 // The following transforms hoist stores/memsets into the loop pre-header. 265 // Give up if the loop has instructions may throw. 266 LoopSafetyInfo SafetyInfo; 267 computeLoopSafetyInfo(&SafetyInfo, CurLoop); 268 if (SafetyInfo.MayThrow) 269 return MadeChange; 270 271 // Scan all the blocks in the loop that are not in subloops. 272 for (auto *BB : CurLoop->getBlocks()) { 273 // Ignore blocks in subloops. 274 if (LI->getLoopFor(BB) != CurLoop) 275 continue; 276 277 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); 278 } 279 return MadeChange; 280} 281 282static unsigned getStoreSizeInBytes(StoreInst *SI, const DataLayout *DL) { 283 uint64_t SizeInBits = DL->getTypeSizeInBits(SI->getValueOperand()->getType()); 284 assert(((SizeInBits & 7) || (SizeInBits >> 32) == 0) && 285 "Don't overflow unsigned."); 286 return (unsigned)SizeInBits >> 3; 287} 288 289static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) { 290 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1)); 291 return ConstStride->getAPInt(); 292} 293 294/// getMemSetPatternValue - If a strided store of the specified value is safe to 295/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 296/// be passed in. Otherwise, return null. 297/// 298/// Note that we don't ever attempt to use memset_pattern8 or 4, because these 299/// just replicate their input array and then pass on to memset_pattern16. 300static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) { 301 // If the value isn't a constant, we can't promote it to being in a constant 302 // array. We could theoretically do a store to an alloca or something, but 303 // that doesn't seem worthwhile. 304 Constant *C = dyn_cast<Constant>(V); 305 if (!C) 306 return nullptr; 307 308 // Only handle simple values that are a power of two bytes in size. 309 uint64_t Size = DL->getTypeSizeInBits(V->getType()); 310 if (Size == 0 || (Size & 7) || (Size & (Size - 1))) 311 return nullptr; 312 313 // Don't care enough about darwin/ppc to implement this. 314 if (DL->isBigEndian()) 315 return nullptr; 316 317 // Convert to size in bytes. 318 Size /= 8; 319 320 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 321 // if the top and bottom are the same (e.g. for vectors and large integers). 322 if (Size > 16) 323 return nullptr; 324 325 // If the constant is exactly 16 bytes, just use it. 326 if (Size == 16) 327 return C; 328 329 // Otherwise, we'll use an array of the constants. 330 unsigned ArraySize = 16 / Size; 331 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 332 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C)); 333} 334 335bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset, 336 bool &ForMemsetPattern, bool &ForMemcpy) { 337 // Don't touch volatile stores. 338 if (!SI->isSimple()) 339 return false; 340 341 // Avoid merging nontemporal stores. 342 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 343 return false; 344 345 Value *StoredVal = SI->getValueOperand(); 346 Value *StorePtr = SI->getPointerOperand(); 347 348 // Reject stores that are so large that they overflow an unsigned. 349 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); 350 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) 351 return false; 352 353 // See if the pointer expression is an AddRec like {base,+,1} on the current 354 // loop, which indicates a strided store. If we have something else, it's a 355 // random store we can't handle. 356 const SCEVAddRecExpr *StoreEv = 357 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 358 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 359 return false; 360 361 // Check to see if we have a constant stride. 362 if (!isa<SCEVConstant>(StoreEv->getOperand(1))) 363 return false; 364 365 // See if the store can be turned into a memset. 366 367 // If the stored value is a byte-wise value (like i32 -1), then it may be 368 // turned into a memset of i8 -1, assuming that all the consecutive bytes 369 // are stored. A store of i32 0x01020304 can never be turned into a memset, 370 // but it can be turned into memset_pattern if the target supports it. 371 Value *SplatValue = isBytewiseValue(StoredVal); 372 Constant *PatternValue = nullptr; 373 374 // If we're allowed to form a memset, and the stored value would be 375 // acceptable for memset, use it. 376 if (HasMemset && SplatValue && 377 // Verify that the stored value is loop invariant. If not, we can't 378 // promote the memset. 379 CurLoop->isLoopInvariant(SplatValue)) { 380 // It looks like we can use SplatValue. 381 ForMemset = true; 382 return true; 383 } else if (HasMemsetPattern && 384 // Don't create memset_pattern16s with address spaces. 385 StorePtr->getType()->getPointerAddressSpace() == 0 && 386 (PatternValue = getMemSetPatternValue(StoredVal, DL))) { 387 // It looks like we can use PatternValue! 388 ForMemsetPattern = true; 389 return true; 390 } 391 392 // Otherwise, see if the store can be turned into a memcpy. 393 if (HasMemcpy) { 394 // Check to see if the stride matches the size of the store. If so, then we 395 // know that every byte is touched in the loop. 396 APInt Stride = getStoreStride(StoreEv); 397 unsigned StoreSize = getStoreSizeInBytes(SI, DL); 398 if (StoreSize != Stride && StoreSize != -Stride) 399 return false; 400 401 // The store must be feeding a non-volatile load. 402 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand()); 403 if (!LI || !LI->isSimple()) 404 return false; 405 406 // See if the pointer expression is an AddRec like {base,+,1} on the current 407 // loop, which indicates a strided load. If we have something else, it's a 408 // random load we can't handle. 409 const SCEVAddRecExpr *LoadEv = 410 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 411 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 412 return false; 413 414 // The store and load must share the same stride. 415 if (StoreEv->getOperand(1) != LoadEv->getOperand(1)) 416 return false; 417 418 // Success. This store can be converted into a memcpy. 419 ForMemcpy = true; 420 return true; 421 } 422 // This store can't be transformed into a memset/memcpy. 423 return false; 424} 425 426void LoopIdiomRecognize::collectStores(BasicBlock *BB) { 427 StoreRefsForMemset.clear(); 428 StoreRefsForMemsetPattern.clear(); 429 StoreRefsForMemcpy.clear(); 430 for (Instruction &I : *BB) { 431 StoreInst *SI = dyn_cast<StoreInst>(&I); 432 if (!SI) 433 continue; 434 435 bool ForMemset = false; 436 bool ForMemsetPattern = false; 437 bool ForMemcpy = false; 438 // Make sure this is a strided store with a constant stride. 439 if (!isLegalStore(SI, ForMemset, ForMemsetPattern, ForMemcpy)) 440 continue; 441 442 // Save the store locations. 443 if (ForMemset) { 444 // Find the base pointer. 445 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 446 StoreRefsForMemset[Ptr].push_back(SI); 447 } else if (ForMemsetPattern) { 448 // Find the base pointer. 449 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 450 StoreRefsForMemsetPattern[Ptr].push_back(SI); 451 } else if (ForMemcpy) 452 StoreRefsForMemcpy.push_back(SI); 453 } 454} 455 456/// runOnLoopBlock - Process the specified block, which lives in a counted loop 457/// with the specified backedge count. This block is known to be in the current 458/// loop and not in any subloops. 459bool LoopIdiomRecognize::runOnLoopBlock( 460 BasicBlock *BB, const SCEV *BECount, 461 SmallVectorImpl<BasicBlock *> &ExitBlocks) { 462 // We can only promote stores in this block if they are unconditionally 463 // executed in the loop. For a block to be unconditionally executed, it has 464 // to dominate all the exit blocks of the loop. Verify this now. 465 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 466 if (!DT->dominates(BB, ExitBlocks[i])) 467 return false; 468 469 bool MadeChange = false; 470 // Look for store instructions, which may be optimized to memset/memcpy. 471 collectStores(BB); 472 473 // Look for a single store or sets of stores with a common base, which can be 474 // optimized into a memset (memset_pattern). The latter most commonly happens 475 // with structs and handunrolled loops. 476 for (auto &SL : StoreRefsForMemset) 477 MadeChange |= processLoopStores(SL.second, BECount, true); 478 479 for (auto &SL : StoreRefsForMemsetPattern) 480 MadeChange |= processLoopStores(SL.second, BECount, false); 481 482 // Optimize the store into a memcpy, if it feeds an similarly strided load. 483 for (auto &SI : StoreRefsForMemcpy) 484 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount); 485 486 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 487 Instruction *Inst = &*I++; 488 // Look for memset instructions, which may be optimized to a larger memset. 489 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { 490 WeakVH InstPtr(&*I); 491 if (!processLoopMemSet(MSI, BECount)) 492 continue; 493 MadeChange = true; 494 495 // If processing the memset invalidated our iterator, start over from the 496 // top of the block. 497 if (!InstPtr) 498 I = BB->begin(); 499 continue; 500 } 501 } 502 503 return MadeChange; 504} 505 506/// processLoopStores - See if this store(s) can be promoted to a memset. 507bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, 508 const SCEV *BECount, 509 bool ForMemset) { 510 // Try to find consecutive stores that can be transformed into memsets. 511 SetVector<StoreInst *> Heads, Tails; 512 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 513 514 // Do a quadratic search on all of the given stores and find 515 // all of the pairs of stores that follow each other. 516 SmallVector<unsigned, 16> IndexQueue; 517 for (unsigned i = 0, e = SL.size(); i < e; ++i) { 518 assert(SL[i]->isSimple() && "Expected only non-volatile stores."); 519 520 Value *FirstStoredVal = SL[i]->getValueOperand(); 521 Value *FirstStorePtr = SL[i]->getPointerOperand(); 522 const SCEVAddRecExpr *FirstStoreEv = 523 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr)); 524 APInt FirstStride = getStoreStride(FirstStoreEv); 525 unsigned FirstStoreSize = getStoreSizeInBytes(SL[i], DL); 526 527 // See if we can optimize just this store in isolation. 528 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) { 529 Heads.insert(SL[i]); 530 continue; 531 } 532 533 Value *FirstSplatValue = nullptr; 534 Constant *FirstPatternValue = nullptr; 535 536 if (ForMemset) 537 FirstSplatValue = isBytewiseValue(FirstStoredVal); 538 else 539 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL); 540 541 assert((FirstSplatValue || FirstPatternValue) && 542 "Expected either splat value or pattern value."); 543 544 IndexQueue.clear(); 545 // If a store has multiple consecutive store candidates, search Stores 546 // array according to the sequence: from i+1 to e, then from i-1 to 0. 547 // This is because usually pairing with immediate succeeding or preceding 548 // candidate create the best chance to find memset opportunity. 549 unsigned j = 0; 550 for (j = i + 1; j < e; ++j) 551 IndexQueue.push_back(j); 552 for (j = i; j > 0; --j) 553 IndexQueue.push_back(j - 1); 554 555 for (auto &k : IndexQueue) { 556 assert(SL[k]->isSimple() && "Expected only non-volatile stores."); 557 Value *SecondStorePtr = SL[k]->getPointerOperand(); 558 const SCEVAddRecExpr *SecondStoreEv = 559 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr)); 560 APInt SecondStride = getStoreStride(SecondStoreEv); 561 562 if (FirstStride != SecondStride) 563 continue; 564 565 Value *SecondStoredVal = SL[k]->getValueOperand(); 566 Value *SecondSplatValue = nullptr; 567 Constant *SecondPatternValue = nullptr; 568 569 if (ForMemset) 570 SecondSplatValue = isBytewiseValue(SecondStoredVal); 571 else 572 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL); 573 574 assert((SecondSplatValue || SecondPatternValue) && 575 "Expected either splat value or pattern value."); 576 577 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) { 578 if (ForMemset) { 579 if (FirstSplatValue != SecondSplatValue) 580 continue; 581 } else { 582 if (FirstPatternValue != SecondPatternValue) 583 continue; 584 } 585 Tails.insert(SL[k]); 586 Heads.insert(SL[i]); 587 ConsecutiveChain[SL[i]] = SL[k]; 588 break; 589 } 590 } 591 } 592 593 // We may run into multiple chains that merge into a single chain. We mark the 594 // stores that we transformed so that we don't visit the same store twice. 595 SmallPtrSet<Value *, 16> TransformedStores; 596 bool Changed = false; 597 598 // For stores that start but don't end a link in the chain: 599 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 600 it != e; ++it) { 601 if (Tails.count(*it)) 602 continue; 603 604 // We found a store instr that starts a chain. Now follow the chain and try 605 // to transform it. 606 SmallPtrSet<Instruction *, 8> AdjacentStores; 607 StoreInst *I = *it; 608 609 StoreInst *HeadStore = I; 610 unsigned StoreSize = 0; 611 612 // Collect the chain into a list. 613 while (Tails.count(I) || Heads.count(I)) { 614 if (TransformedStores.count(I)) 615 break; 616 AdjacentStores.insert(I); 617 618 StoreSize += getStoreSizeInBytes(I, DL); 619 // Move to the next value in the chain. 620 I = ConsecutiveChain[I]; 621 } 622 623 Value *StoredVal = HeadStore->getValueOperand(); 624 Value *StorePtr = HeadStore->getPointerOperand(); 625 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 626 APInt Stride = getStoreStride(StoreEv); 627 628 // Check to see if the stride matches the size of the stores. If so, then 629 // we know that every byte is touched in the loop. 630 if (StoreSize != Stride && StoreSize != -Stride) 631 continue; 632 633 bool NegStride = StoreSize == -Stride; 634 635 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(), 636 StoredVal, HeadStore, AdjacentStores, StoreEv, 637 BECount, NegStride)) { 638 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end()); 639 Changed = true; 640 } 641 } 642 643 return Changed; 644} 645 646/// processLoopMemSet - See if this memset can be promoted to a large memset. 647bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, 648 const SCEV *BECount) { 649 // We can only handle non-volatile memsets with a constant size. 650 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) 651 return false; 652 653 // If we're not allowed to hack on memset, we fail. 654 if (!HasMemset) 655 return false; 656 657 Value *Pointer = MSI->getDest(); 658 659 // See if the pointer expression is an AddRec like {base,+,1} on the current 660 // loop, which indicates a strided store. If we have something else, it's a 661 // random store we can't handle. 662 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 663 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) 664 return false; 665 666 // Reject memsets that are so large that they overflow an unsigned. 667 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 668 if ((SizeInBytes >> 32) != 0) 669 return false; 670 671 // Check to see if the stride matches the size of the memset. If so, then we 672 // know that every byte is touched in the loop. 673 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 674 if (!ConstStride) 675 return false; 676 677 APInt Stride = ConstStride->getAPInt(); 678 if (SizeInBytes != Stride && SizeInBytes != -Stride) 679 return false; 680 681 // Verify that the memset value is loop invariant. If not, we can't promote 682 // the memset. 683 Value *SplatValue = MSI->getValue(); 684 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue)) 685 return false; 686 687 SmallPtrSet<Instruction *, 1> MSIs; 688 MSIs.insert(MSI); 689 bool NegStride = SizeInBytes == -Stride; 690 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes, 691 MSI->getAlignment(), SplatValue, MSI, MSIs, Ev, 692 BECount, NegStride); 693} 694 695/// mayLoopAccessLocation - Return true if the specified loop might access the 696/// specified pointer location, which is a loop-strided access. The 'Access' 697/// argument specifies what the verboten forms of access are (read or write). 698static bool 699mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, 700 const SCEV *BECount, unsigned StoreSize, 701 AliasAnalysis &AA, 702 SmallPtrSetImpl<Instruction *> &IgnoredStores) { 703 // Get the location that may be stored across the loop. Since the access is 704 // strided positively through memory, we say that the modified location starts 705 // at the pointer and has infinite size. 706 uint64_t AccessSize = MemoryLocation::UnknownSize; 707 708 // If the loop iterates a fixed number of times, we can refine the access size 709 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 710 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 711 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize; 712 713 // TODO: For this to be really effective, we have to dive into the pointer 714 // operand in the store. Store to &A[i] of 100 will always return may alias 715 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 716 // which will then no-alias a store to &A[100]. 717 MemoryLocation StoreLoc(Ptr, AccessSize); 718 719 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 720 ++BI) 721 for (Instruction &I : **BI) 722 if (IgnoredStores.count(&I) == 0 && 723 (AA.getModRefInfo(&I, StoreLoc) & Access)) 724 return true; 725 726 return false; 727} 728 729// If we have a negative stride, Start refers to the end of the memory location 730// we're trying to memset. Therefore, we need to recompute the base pointer, 731// which is just Start - BECount*Size. 732static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount, 733 Type *IntPtr, unsigned StoreSize, 734 ScalarEvolution *SE) { 735 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr); 736 if (StoreSize != 1) 737 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize), 738 SCEV::FlagNUW); 739 return SE->getMinusSCEV(Start, Index); 740} 741 742/// processLoopStridedStore - We see a strided store of some value. If we can 743/// transform this into a memset or memset_pattern in the loop preheader, do so. 744bool LoopIdiomRecognize::processLoopStridedStore( 745 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment, 746 Value *StoredVal, Instruction *TheStore, 747 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev, 748 const SCEV *BECount, bool NegStride) { 749 Value *SplatValue = isBytewiseValue(StoredVal); 750 Constant *PatternValue = nullptr; 751 752 if (!SplatValue) 753 PatternValue = getMemSetPatternValue(StoredVal, DL); 754 755 assert((SplatValue || PatternValue) && 756 "Expected either splat value or pattern value."); 757 758 // The trip count of the loop and the base pointer of the addrec SCEV is 759 // guaranteed to be loop invariant, which means that it should dominate the 760 // header. This allows us to insert code for it in the preheader. 761 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); 762 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 763 IRBuilder<> Builder(Preheader->getTerminator()); 764 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 765 766 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); 767 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS); 768 769 const SCEV *Start = Ev->getStart(); 770 // Handle negative strided loops. 771 if (NegStride) 772 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE); 773 774 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 775 // this into a memset in the loop preheader now if we want. However, this 776 // would be unsafe to do if there is anything else in the loop that may read 777 // or write to the aliased location. Check for any overlap by generating the 778 // base pointer and checking the region. 779 Value *BasePtr = 780 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); 781 if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize, 782 *AA, Stores)) { 783 Expander.clear(); 784 // If we generated new code for the base pointer, clean up. 785 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); 786 return false; 787 } 788 789 // Okay, everything looks good, insert the memset. 790 791 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 792 // pointer size if it isn't already. 793 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); 794 795 const SCEV *NumBytesS = 796 SE->getAddExpr(BECount, SE->getOne(IntPtr), SCEV::FlagNUW); 797 if (StoreSize != 1) { 798 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 799 SCEV::FlagNUW); 800 } 801 802 Value *NumBytes = 803 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); 804 805 CallInst *NewCall; 806 if (SplatValue) { 807 NewCall = 808 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment); 809 } else { 810 // Everything is emitted in default address space 811 Type *Int8PtrTy = DestInt8PtrTy; 812 813 Module *M = TheStore->getModule(); 814 Value *MSP = 815 M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(), 816 Int8PtrTy, Int8PtrTy, IntPtr, (void *)nullptr); 817 inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI); 818 819 // Otherwise we should form a memset_pattern16. PatternValue is known to be 820 // an constant array of 16-bytes. Plop the value into a mergable global. 821 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 822 GlobalValue::PrivateLinkage, 823 PatternValue, ".memset_pattern"); 824 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these. 825 GV->setAlignment(16); 826 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); 827 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); 828 } 829 830 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 831 << " from store to: " << *Ev << " at: " << *TheStore << "\n"); 832 NewCall->setDebugLoc(TheStore->getDebugLoc()); 833 834 // Okay, the memset has been formed. Zap the original store and anything that 835 // feeds into it. 836 for (auto *I : Stores) 837 deleteDeadInstruction(I); 838 ++NumMemSet; 839 return true; 840} 841 842/// If the stored value is a strided load in the same loop with the same stride 843/// this may be transformable into a memcpy. This kicks in for stuff like 844/// for (i) A[i] = B[i]; 845bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, 846 const SCEV *BECount) { 847 assert(SI->isSimple() && "Expected only non-volatile stores."); 848 849 Value *StorePtr = SI->getPointerOperand(); 850 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 851 APInt Stride = getStoreStride(StoreEv); 852 unsigned StoreSize = getStoreSizeInBytes(SI, DL); 853 bool NegStride = StoreSize == -Stride; 854 855 // The store must be feeding a non-volatile load. 856 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 857 assert(LI->isSimple() && "Expected only non-volatile stores."); 858 859 // See if the pointer expression is an AddRec like {base,+,1} on the current 860 // loop, which indicates a strided load. If we have something else, it's a 861 // random load we can't handle. 862 const SCEVAddRecExpr *LoadEv = 863 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 864 865 // The trip count of the loop and the base pointer of the addrec SCEV is 866 // guaranteed to be loop invariant, which means that it should dominate the 867 // header. This allows us to insert code for it in the preheader. 868 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 869 IRBuilder<> Builder(Preheader->getTerminator()); 870 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 871 872 const SCEV *StrStart = StoreEv->getStart(); 873 unsigned StrAS = SI->getPointerAddressSpace(); 874 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS); 875 876 // Handle negative strided loops. 877 if (NegStride) 878 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE); 879 880 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 881 // this into a memcpy in the loop preheader now if we want. However, this 882 // would be unsafe to do if there is anything else in the loop that may read 883 // or write the memory region we're storing to. This includes the load that 884 // feeds the stores. Check for an alias by generating the base address and 885 // checking everything. 886 Value *StoreBasePtr = Expander.expandCodeFor( 887 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator()); 888 889 SmallPtrSet<Instruction *, 1> Stores; 890 Stores.insert(SI); 891 if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, 892 StoreSize, *AA, Stores)) { 893 Expander.clear(); 894 // If we generated new code for the base pointer, clean up. 895 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 896 return false; 897 } 898 899 const SCEV *LdStart = LoadEv->getStart(); 900 unsigned LdAS = LI->getPointerAddressSpace(); 901 902 // Handle negative strided loops. 903 if (NegStride) 904 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE); 905 906 // For a memcpy, we have to make sure that the input array is not being 907 // mutated by the loop. 908 Value *LoadBasePtr = Expander.expandCodeFor( 909 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); 910 911 if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize, 912 *AA, Stores)) { 913 Expander.clear(); 914 // If we generated new code for the base pointer, clean up. 915 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI); 916 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 917 return false; 918 } 919 920 // Okay, everything is safe, we can transform this! 921 922 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 923 // pointer size if it isn't already. 924 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy); 925 926 const SCEV *NumBytesS = 927 SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW); 928 if (StoreSize != 1) 929 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize), 930 SCEV::FlagNUW); 931 932 Value *NumBytes = 933 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); 934 935 CallInst *NewCall = 936 Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, 937 std::min(SI->getAlignment(), LI->getAlignment())); 938 NewCall->setDebugLoc(SI->getDebugLoc()); 939 940 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" 941 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" 942 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n"); 943 944 // Okay, the memcpy has been formed. Zap the original store and anything that 945 // feeds into it. 946 deleteDeadInstruction(SI); 947 ++NumMemCpy; 948 return true; 949} 950 951bool LoopIdiomRecognize::runOnNoncountableLoop() { 952 return recognizePopcount(); 953} 954 955/// Check if the given conditional branch is based on the comparison between 956/// a variable and zero, and if the variable is non-zero, the control yields to 957/// the loop entry. If the branch matches the behavior, the variable involved 958/// in the comparion is returned. This function will be called to see if the 959/// precondition and postcondition of the loop are in desirable form. 960static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) { 961 if (!BI || !BI->isConditional()) 962 return nullptr; 963 964 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition()); 965 if (!Cond) 966 return nullptr; 967 968 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); 969 if (!CmpZero || !CmpZero->isZero()) 970 return nullptr; 971 972 ICmpInst::Predicate Pred = Cond->getPredicate(); 973 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) || 974 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry)) 975 return Cond->getOperand(0); 976 977 return nullptr; 978} 979 980/// Return true iff the idiom is detected in the loop. 981/// 982/// Additionally: 983/// 1) \p CntInst is set to the instruction counting the population bit. 984/// 2) \p CntPhi is set to the corresponding phi node. 985/// 3) \p Var is set to the value whose population bits are being counted. 986/// 987/// The core idiom we are trying to detect is: 988/// \code 989/// if (x0 != 0) 990/// goto loop-exit // the precondition of the loop 991/// cnt0 = init-val; 992/// do { 993/// x1 = phi (x0, x2); 994/// cnt1 = phi(cnt0, cnt2); 995/// 996/// cnt2 = cnt1 + 1; 997/// ... 998/// x2 = x1 & (x1 - 1); 999/// ... 1000/// } while(x != 0); 1001/// 1002/// loop-exit: 1003/// \endcode 1004static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, 1005 Instruction *&CntInst, PHINode *&CntPhi, 1006 Value *&Var) { 1007 // step 1: Check to see if the look-back branch match this pattern: 1008 // "if (a!=0) goto loop-entry". 1009 BasicBlock *LoopEntry; 1010 Instruction *DefX2, *CountInst; 1011 Value *VarX1, *VarX0; 1012 PHINode *PhiX, *CountPhi; 1013 1014 DefX2 = CountInst = nullptr; 1015 VarX1 = VarX0 = nullptr; 1016 PhiX = CountPhi = nullptr; 1017 LoopEntry = *(CurLoop->block_begin()); 1018 1019 // step 1: Check if the loop-back branch is in desirable form. 1020 { 1021 if (Value *T = matchCondition( 1022 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1023 DefX2 = dyn_cast<Instruction>(T); 1024 else 1025 return false; 1026 } 1027 1028 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" 1029 { 1030 if (!DefX2 || DefX2->getOpcode() != Instruction::And) 1031 return false; 1032 1033 BinaryOperator *SubOneOp; 1034 1035 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) 1036 VarX1 = DefX2->getOperand(1); 1037 else { 1038 VarX1 = DefX2->getOperand(0); 1039 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); 1040 } 1041 if (!SubOneOp) 1042 return false; 1043 1044 Instruction *SubInst = cast<Instruction>(SubOneOp); 1045 ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1)); 1046 if (!Dec || 1047 !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) || 1048 (SubInst->getOpcode() == Instruction::Add && 1049 Dec->isAllOnesValue()))) { 1050 return false; 1051 } 1052 } 1053 1054 // step 3: Check the recurrence of variable X 1055 { 1056 PhiX = dyn_cast<PHINode>(VarX1); 1057 if (!PhiX || 1058 (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) { 1059 return false; 1060 } 1061 } 1062 1063 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 1064 { 1065 CountInst = nullptr; 1066 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1067 IterE = LoopEntry->end(); 1068 Iter != IterE; Iter++) { 1069 Instruction *Inst = &*Iter; 1070 if (Inst->getOpcode() != Instruction::Add) 1071 continue; 1072 1073 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1074 if (!Inc || !Inc->isOne()) 1075 continue; 1076 1077 PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0)); 1078 if (!Phi || Phi->getParent() != LoopEntry) 1079 continue; 1080 1081 // Check if the result of the instruction is live of the loop. 1082 bool LiveOutLoop = false; 1083 for (User *U : Inst->users()) { 1084 if ((cast<Instruction>(U))->getParent() != LoopEntry) { 1085 LiveOutLoop = true; 1086 break; 1087 } 1088 } 1089 1090 if (LiveOutLoop) { 1091 CountInst = Inst; 1092 CountPhi = Phi; 1093 break; 1094 } 1095 } 1096 1097 if (!CountInst) 1098 return false; 1099 } 1100 1101 // step 5: check if the precondition is in this form: 1102 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" 1103 { 1104 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1105 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader()); 1106 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) 1107 return false; 1108 1109 CntInst = CountInst; 1110 CntPhi = CountPhi; 1111 Var = T; 1112 } 1113 1114 return true; 1115} 1116 1117/// Recognizes a population count idiom in a non-countable loop. 1118/// 1119/// If detected, transforms the relevant code to issue the popcount intrinsic 1120/// function call, and returns true; otherwise, returns false. 1121bool LoopIdiomRecognize::recognizePopcount() { 1122 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) 1123 return false; 1124 1125 // Counting population are usually conducted by few arithmetic instructions. 1126 // Such instructions can be easily "absorbed" by vacant slots in a 1127 // non-compact loop. Therefore, recognizing popcount idiom only makes sense 1128 // in a compact loop. 1129 1130 // Give up if the loop has multiple blocks or multiple backedges. 1131 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1132 return false; 1133 1134 BasicBlock *LoopBody = *(CurLoop->block_begin()); 1135 if (LoopBody->size() >= 20) { 1136 // The loop is too big, bail out. 1137 return false; 1138 } 1139 1140 // It should have a preheader containing nothing but an unconditional branch. 1141 BasicBlock *PH = CurLoop->getLoopPreheader(); 1142 if (!PH) 1143 return false; 1144 if (&PH->front() != PH->getTerminator()) 1145 return false; 1146 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator()); 1147 if (!EntryBI || EntryBI->isConditional()) 1148 return false; 1149 1150 // It should have a precondition block where the generated popcount instrinsic 1151 // function can be inserted. 1152 auto *PreCondBB = PH->getSinglePredecessor(); 1153 if (!PreCondBB) 1154 return false; 1155 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1156 if (!PreCondBI || PreCondBI->isUnconditional()) 1157 return false; 1158 1159 Instruction *CntInst; 1160 PHINode *CntPhi; 1161 Value *Val; 1162 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val)) 1163 return false; 1164 1165 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val); 1166 return true; 1167} 1168 1169static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1170 const DebugLoc &DL) { 1171 Value *Ops[] = {Val}; 1172 Type *Tys[] = {Val->getType()}; 1173 1174 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1175 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); 1176 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1177 CI->setDebugLoc(DL); 1178 1179 return CI; 1180} 1181 1182void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, 1183 Instruction *CntInst, 1184 PHINode *CntPhi, Value *Var) { 1185 BasicBlock *PreHead = CurLoop->getLoopPreheader(); 1186 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1187 const DebugLoc DL = CntInst->getDebugLoc(); 1188 1189 // Assuming before transformation, the loop is following: 1190 // if (x) // the precondition 1191 // do { cnt++; x &= x - 1; } while(x); 1192 1193 // Step 1: Insert the ctpop instruction at the end of the precondition block 1194 IRBuilder<> Builder(PreCondBr); 1195 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; 1196 { 1197 PopCnt = createPopcntIntrinsic(Builder, Var, DL); 1198 NewCount = PopCntZext = 1199 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); 1200 1201 if (NewCount != PopCnt) 1202 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1203 1204 // TripCnt is exactly the number of iterations the loop has 1205 TripCnt = NewCount; 1206 1207 // If the population counter's initial value is not zero, insert Add Inst. 1208 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); 1209 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1210 if (!InitConst || !InitConst->isZero()) { 1211 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1212 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1213 } 1214 } 1215 1216 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to 1217 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic 1218 // function would be partial dead code, and downstream passes will drag 1219 // it back from the precondition block to the preheader. 1220 { 1221 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); 1222 1223 Value *Opnd0 = PopCntZext; 1224 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); 1225 if (PreCond->getOperand(0) != Var) 1226 std::swap(Opnd0, Opnd1); 1227 1228 ICmpInst *NewPreCond = cast<ICmpInst>( 1229 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); 1230 PreCondBr->setCondition(NewPreCond); 1231 1232 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); 1233 } 1234 1235 // Step 3: Note that the population count is exactly the trip count of the 1236 // loop in question, which enable us to to convert the loop from noncountable 1237 // loop into a countable one. The benefit is twofold: 1238 // 1239 // - If the loop only counts population, the entire loop becomes dead after 1240 // the transformation. It is a lot easier to prove a countable loop dead 1241 // than to prove a noncountable one. (In some C dialects, an infinite loop 1242 // isn't dead even if it computes nothing useful. In general, DCE needs 1243 // to prove a noncountable loop finite before safely delete it.) 1244 // 1245 // - If the loop also performs something else, it remains alive. 1246 // Since it is transformed to countable form, it can be aggressively 1247 // optimized by some optimizations which are in general not applicable 1248 // to a noncountable loop. 1249 // 1250 // After this step, this loop (conceptually) would look like following: 1251 // newcnt = __builtin_ctpop(x); 1252 // t = newcnt; 1253 // if (x) 1254 // do { cnt++; x &= x-1; t--) } while (t > 0); 1255 BasicBlock *Body = *(CurLoop->block_begin()); 1256 { 1257 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator()); 1258 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1259 Type *Ty = TripCnt->getType(); 1260 1261 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1262 1263 Builder.SetInsertPoint(LbCond); 1264 Instruction *TcDec = cast<Instruction>( 1265 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1266 "tcdec", false, true)); 1267 1268 TcPhi->addIncoming(TripCnt, PreHead); 1269 TcPhi->addIncoming(TcDec, Body); 1270 1271 CmpInst::Predicate Pred = 1272 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; 1273 LbCond->setPredicate(Pred); 1274 LbCond->setOperand(0, TcDec); 1275 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1276 } 1277 1278 // Step 4: All the references to the original population counter outside 1279 // the loop are replaced with the NewCount -- the value returned from 1280 // __builtin_ctpop(). 1281 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1282 1283 // step 5: Forget the "non-computable" trip-count SCEV associated with the 1284 // loop. The loop would otherwise not be deleted even if it becomes empty. 1285 SE->forgetLoop(CurLoop); 1286} 1287