LICM.cpp revision ac0b6ae358944ae8b2b5a11dc08f52c3ed89f2da
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs loop invariant code motion, attempting to remove as much 11// code from the body of a loop as possible. It does this by either hoisting 12// code into the preheader block, or by sinking code to the exit blocks if it is 13// safe. This pass also promotes must-aliased memory locations in the loop to 14// live in registers, thus hoisting and sinking "invariant" loads and stores. 15// 16// This pass uses alias analysis for two purposes: 17// 18// 1. Moving loop invariant loads and calls out of loops. If we can determine 19// that a load or call inside of a loop never aliases anything stored to, 20// we can hoist it or sink it like any other instruction. 21// 2. Scalar Promotion of Memory - If there is a store instruction inside of 22// the loop, we try to move the store to happen AFTER the loop instead of 23// inside of the loop. This can only happen if a few conditions are true: 24// A. The pointer stored through is loop invariant 25// B. There are no stores or loads in the loop which _may_ alias the 26// pointer. There are no calls in the loop which mod/ref the pointer. 27// If these conditions are true, we can promote the loads and stores in the 28// loop of the pointer to use a temporary alloca'd variable. We then use 29// the mem2reg functionality to construct the appropriate SSA form for the 30// variable. 31// 32//===----------------------------------------------------------------------===// 33 34#define DEBUG_TYPE "licm" 35#include "llvm/Transforms/Scalar.h" 36#include "llvm/DerivedTypes.h" 37#include "llvm/Instructions.h" 38#include "llvm/Target/TargetData.h" 39#include "llvm/Analysis/LoopInfo.h" 40#include "llvm/Analysis/AliasAnalysis.h" 41#include "llvm/Analysis/AliasSetTracker.h" 42#include "llvm/Analysis/Dominators.h" 43#include "llvm/Support/CFG.h" 44#include "llvm/Transforms/Utils/PromoteMemToReg.h" 45#include "llvm/Transforms/Utils/Local.h" 46#include "llvm/Support/CommandLine.h" 47#include "llvm/Support/Debug.h" 48#include "llvm/ADT/Statistic.h" 49#include <algorithm> 50using namespace llvm; 51 52namespace { 53 cl::opt<bool> 54 DisablePromotion("disable-licm-promotion", cl::Hidden, 55 cl::desc("Disable memory promotion in LICM pass")); 56 57 Statistic NumSunk("licm", "Number of instructions sunk out of loop"); 58 Statistic NumHoisted("licm", "Number of instructions hoisted out of loop"); 59 Statistic NumMovedLoads("licm", "Number of load insts hoisted or sunk"); 60 Statistic NumMovedCalls("licm", "Number of call insts hoisted or sunk"); 61 Statistic NumPromoted("licm", 62 "Number of memory locations promoted to registers"); 63 64 struct LICM : public FunctionPass { 65 virtual bool runOnFunction(Function &F); 66 67 /// This transformation requires natural loop information & requires that 68 /// loop preheaders be inserted into the CFG... 69 /// 70 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 71 AU.setPreservesCFG(); 72 AU.addRequiredID(LoopSimplifyID); 73 AU.addRequired<LoopInfo>(); 74 AU.addRequired<DominatorTree>(); 75 AU.addRequired<DominanceFrontier>(); // For scalar promotion (mem2reg) 76 AU.addRequired<AliasAnalysis>(); 77 } 78 79 private: 80 // Various analyses that we use... 81 AliasAnalysis *AA; // Current AliasAnalysis information 82 LoopInfo *LI; // Current LoopInfo 83 DominatorTree *DT; // Dominator Tree for the current Loop... 84 DominanceFrontier *DF; // Current Dominance Frontier 85 86 // State that is updated as we process loops 87 bool Changed; // Set to true when we change anything. 88 BasicBlock *Preheader; // The preheader block of the current loop... 89 Loop *CurLoop; // The current loop we are working on... 90 AliasSetTracker *CurAST; // AliasSet information for the current loop... 91 92 /// visitLoop - Hoist expressions out of the specified loop... 93 /// 94 void visitLoop(Loop *L, AliasSetTracker &AST); 95 96 /// SinkRegion - Walk the specified region of the CFG (defined by all blocks 97 /// dominated by the specified block, and that are in the current loop) in 98 /// reverse depth first order w.r.t the DominatorTree. This allows us to 99 /// visit uses before definitions, allowing us to sink a loop body in one 100 /// pass without iteration. 101 /// 102 void SinkRegion(DominatorTree::Node *N); 103 104 /// HoistRegion - Walk the specified region of the CFG (defined by all 105 /// blocks dominated by the specified block, and that are in the current 106 /// loop) in depth first order w.r.t the DominatorTree. This allows us to 107 /// visit definitions before uses, allowing us to hoist a loop body in one 108 /// pass without iteration. 109 /// 110 void HoistRegion(DominatorTree::Node *N); 111 112 /// inSubLoop - Little predicate that returns true if the specified basic 113 /// block is in a subloop of the current one, not the current one itself. 114 /// 115 bool inSubLoop(BasicBlock *BB) { 116 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 117 for (Loop::iterator I = CurLoop->begin(), E = CurLoop->end(); I != E; ++I) 118 if ((*I)->contains(BB)) 119 return true; // A subloop actually contains this block! 120 return false; 121 } 122 123 /// isExitBlockDominatedByBlockInLoop - This method checks to see if the 124 /// specified exit block of the loop is dominated by the specified block 125 /// that is in the body of the loop. We use these constraints to 126 /// dramatically limit the amount of the dominator tree that needs to be 127 /// searched. 128 bool isExitBlockDominatedByBlockInLoop(BasicBlock *ExitBlock, 129 BasicBlock *BlockInLoop) const { 130 // If the block in the loop is the loop header, it must be dominated! 131 BasicBlock *LoopHeader = CurLoop->getHeader(); 132 if (BlockInLoop == LoopHeader) 133 return true; 134 135 DominatorTree::Node *BlockInLoopNode = DT->getNode(BlockInLoop); 136 DominatorTree::Node *IDom = DT->getNode(ExitBlock); 137 138 // Because the exit block is not in the loop, we know we have to get _at 139 // least_ its immediate dominator. 140 do { 141 // Get next Immediate Dominator. 142 IDom = IDom->getIDom(); 143 144 // If we have got to the header of the loop, then the instructions block 145 // did not dominate the exit node, so we can't hoist it. 146 if (IDom->getBlock() == LoopHeader) 147 return false; 148 149 } while (IDom != BlockInLoopNode); 150 151 return true; 152 } 153 154 /// sink - When an instruction is found to only be used outside of the loop, 155 /// this function moves it to the exit blocks and patches up SSA form as 156 /// needed. 157 /// 158 void sink(Instruction &I); 159 160 /// hoist - When an instruction is found to only use loop invariant operands 161 /// that is safe to hoist, this instruction is called to do the dirty work. 162 /// 163 void hoist(Instruction &I); 164 165 /// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it 166 /// is not a trapping instruction or if it is a trapping instruction and is 167 /// guaranteed to execute. 168 /// 169 bool isSafeToExecuteUnconditionally(Instruction &I); 170 171 /// pointerInvalidatedByLoop - Return true if the body of this loop may 172 /// store into the memory location pointed to by V. 173 /// 174 bool pointerInvalidatedByLoop(Value *V, unsigned Size) { 175 // Check to see if any of the basic blocks in CurLoop invalidate *V. 176 return CurAST->getAliasSetForPointer(V, Size).isMod(); 177 } 178 179 bool canSinkOrHoistInst(Instruction &I); 180 bool isLoopInvariantInst(Instruction &I); 181 bool isNotUsedInLoop(Instruction &I); 182 183 /// PromoteValuesInLoop - Look at the stores in the loop and promote as many 184 /// to scalars as we can. 185 /// 186 void PromoteValuesInLoop(); 187 188 /// FindPromotableValuesInLoop - Check the current loop for stores to 189 /// definite pointers, which are not loaded and stored through may aliases. 190 /// If these are found, create an alloca for the value, add it to the 191 /// PromotedValues list, and keep track of the mapping from value to 192 /// alloca... 193 /// 194 void FindPromotableValuesInLoop( 195 std::vector<std::pair<AllocaInst*, Value*> > &PromotedValues, 196 std::map<Value*, AllocaInst*> &Val2AlMap); 197 }; 198 199 RegisterPass<LICM> X("licm", "Loop Invariant Code Motion"); 200} 201 202FunctionPass *llvm::createLICMPass() { return new LICM(); } 203 204/// runOnFunction - For LICM, this simply traverses the loop structure of the 205/// function, hoisting expressions out of loops if possible. 206/// 207bool LICM::runOnFunction(Function &) { 208 Changed = false; 209 210 // Get our Loop and Alias Analysis information... 211 LI = &getAnalysis<LoopInfo>(); 212 AA = &getAnalysis<AliasAnalysis>(); 213 DF = &getAnalysis<DominanceFrontier>(); 214 DT = &getAnalysis<DominatorTree>(); 215 216 // Hoist expressions out of all of the top-level loops. 217 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) { 218 AliasSetTracker AST(*AA); 219 visitLoop(*I, AST); 220 } 221 return Changed; 222} 223 224 225/// visitLoop - Hoist expressions out of the specified loop... 226/// 227void LICM::visitLoop(Loop *L, AliasSetTracker &AST) { 228 // Recurse through all subloops before we process this loop... 229 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) { 230 AliasSetTracker SubAST(*AA); 231 visitLoop(*I, SubAST); 232 233 // Incorporate information about the subloops into this loop... 234 AST.add(SubAST); 235 } 236 CurLoop = L; 237 CurAST = &AST; 238 239 // Get the preheader block to move instructions into... 240 Preheader = L->getLoopPreheader(); 241 assert(Preheader&&"Preheader insertion pass guarantees we have a preheader!"); 242 243 // Loop over the body of this loop, looking for calls, invokes, and stores. 244 // Because subloops have already been incorporated into AST, we skip blocks in 245 // subloops. 246 // 247 for (std::vector<BasicBlock*>::const_iterator I = L->getBlocks().begin(), 248 E = L->getBlocks().end(); I != E; ++I) 249 if (LI->getLoopFor(*I) == L) // Ignore blocks in subloops... 250 AST.add(**I); // Incorporate the specified basic block 251 252 // We want to visit all of the instructions in this loop... that are not parts 253 // of our subloops (they have already had their invariants hoisted out of 254 // their loop, into this loop, so there is no need to process the BODIES of 255 // the subloops). 256 // 257 // Traverse the body of the loop in depth first order on the dominator tree so 258 // that we are guaranteed to see definitions before we see uses. This allows 259 // us to sink instructions in one pass, without iteration. AFter sinking 260 // instructions, we perform another pass to hoist them out of the loop. 261 // 262 SinkRegion(DT->getNode(L->getHeader())); 263 HoistRegion(DT->getNode(L->getHeader())); 264 265 // Now that all loop invariants have been removed from the loop, promote any 266 // memory references to scalars that we can... 267 if (!DisablePromotion) 268 PromoteValuesInLoop(); 269 270 // Clear out loops state information for the next iteration 271 CurLoop = 0; 272 Preheader = 0; 273} 274 275/// SinkRegion - Walk the specified region of the CFG (defined by all blocks 276/// dominated by the specified block, and that are in the current loop) in 277/// reverse depth first order w.r.t the DominatorTree. This allows us to visit 278/// uses before definitions, allowing us to sink a loop body in one pass without 279/// iteration. 280/// 281void LICM::SinkRegion(DominatorTree::Node *N) { 282 assert(N != 0 && "Null dominator tree node?"); 283 BasicBlock *BB = N->getBlock(); 284 285 // If this subregion is not in the top level loop at all, exit. 286 if (!CurLoop->contains(BB)) return; 287 288 // We are processing blocks in reverse dfo, so process children first... 289 const std::vector<DominatorTree::Node*> &Children = N->getChildren(); 290 for (unsigned i = 0, e = Children.size(); i != e; ++i) 291 SinkRegion(Children[i]); 292 293 // Only need to process the contents of this block if it is not part of a 294 // subloop (which would already have been processed). 295 if (inSubLoop(BB)) return; 296 297 for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) { 298 Instruction &I = *--II; 299 300 // Check to see if we can sink this instruction to the exit blocks 301 // of the loop. We can do this if the all users of the instruction are 302 // outside of the loop. In this case, it doesn't even matter if the 303 // operands of the instruction are loop invariant. 304 // 305 if (isNotUsedInLoop(I) && canSinkOrHoistInst(I)) { 306 ++II; 307 sink(I); 308 } 309 } 310} 311 312 313/// HoistRegion - Walk the specified region of the CFG (defined by all blocks 314/// dominated by the specified block, and that are in the current loop) in depth 315/// first order w.r.t the DominatorTree. This allows us to visit definitions 316/// before uses, allowing us to hoist a loop body in one pass without iteration. 317/// 318void LICM::HoistRegion(DominatorTree::Node *N) { 319 assert(N != 0 && "Null dominator tree node?"); 320 BasicBlock *BB = N->getBlock(); 321 322 // If this subregion is not in the top level loop at all, exit. 323 if (!CurLoop->contains(BB)) return; 324 325 // Only need to process the contents of this block if it is not part of a 326 // subloop (which would already have been processed). 327 if (!inSubLoop(BB)) 328 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) { 329 Instruction &I = *II++; 330 331 // Try hoisting the instruction out to the preheader. We can only do this 332 // if all of the operands of the instruction are loop invariant and if it 333 // is safe to hoist the instruction. 334 // 335 if (isLoopInvariantInst(I) && canSinkOrHoistInst(I) && 336 isSafeToExecuteUnconditionally(I)) 337 hoist(I); 338 } 339 340 const std::vector<DominatorTree::Node*> &Children = N->getChildren(); 341 for (unsigned i = 0, e = Children.size(); i != e; ++i) 342 HoistRegion(Children[i]); 343} 344 345/// canSinkOrHoistInst - Return true if the hoister and sinker can handle this 346/// instruction. 347/// 348bool LICM::canSinkOrHoistInst(Instruction &I) { 349 // Loads have extra constraints we have to verify before we can hoist them. 350 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 351 if (LI->isVolatile()) 352 return false; // Don't hoist volatile loads! 353 354 // Don't hoist loads which have may-aliased stores in loop. 355 unsigned Size = 0; 356 if (LI->getType()->isSized()) 357 Size = AA->getTargetData().getTypeSize(LI->getType()); 358 return !pointerInvalidatedByLoop(LI->getOperand(0), Size); 359 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 360 // Handle obvious cases efficiently. 361 if (Function *Callee = CI->getCalledFunction()) { 362 AliasAnalysis::ModRefBehavior Behavior =AA->getModRefBehavior(Callee, CI); 363 if (Behavior == AliasAnalysis::DoesNotAccessMemory) 364 return true; 365 else if (Behavior == AliasAnalysis::OnlyReadsMemory) { 366 // If this call only reads from memory and there are no writes to memory 367 // in the loop, we can hoist or sink the call as appropriate. 368 bool FoundMod = false; 369 for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end(); 370 I != E; ++I) { 371 AliasSet &AS = *I; 372 if (!AS.isForwardingAliasSet() && AS.isMod()) { 373 FoundMod = true; 374 break; 375 } 376 } 377 if (!FoundMod) return true; 378 } 379 } 380 381 // FIXME: This should use mod/ref information to see if we can hoist or sink 382 // the call. 383 384 return false; 385 } 386 387 // Otherwise these instructions are hoistable/sinkable 388 return isa<BinaryOperator>(I) || isa<ShiftInst>(I) || isa<CastInst>(I) || 389 isa<SelectInst>(I) || isa<GetElementPtrInst>(I); 390} 391 392/// isNotUsedInLoop - Return true if the only users of this instruction are 393/// outside of the loop. If this is true, we can sink the instruction to the 394/// exit blocks of the loop. 395/// 396bool LICM::isNotUsedInLoop(Instruction &I) { 397 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI) { 398 Instruction *User = cast<Instruction>(*UI); 399 if (PHINode *PN = dyn_cast<PHINode>(User)) { 400 // PHI node uses occur in predecessor blocks! 401 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 402 if (PN->getIncomingValue(i) == &I) 403 if (CurLoop->contains(PN->getIncomingBlock(i))) 404 return false; 405 } else if (CurLoop->contains(User->getParent())) { 406 return false; 407 } 408 } 409 return true; 410} 411 412 413/// isLoopInvariantInst - Return true if all operands of this instruction are 414/// loop invariant. We also filter out non-hoistable instructions here just for 415/// efficiency. 416/// 417bool LICM::isLoopInvariantInst(Instruction &I) { 418 // The instruction is loop invariant if all of its operands are loop-invariant 419 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) 420 if (!CurLoop->isLoopInvariant(I.getOperand(i))) 421 return false; 422 423 // If we got this far, the instruction is loop invariant! 424 return true; 425} 426 427/// sink - When an instruction is found to only be used outside of the loop, 428/// this function moves it to the exit blocks and patches up SSA form as needed. 429/// This method is guaranteed to remove the original instruction from its 430/// position, and may either delete it or move it to outside of the loop. 431/// 432void LICM::sink(Instruction &I) { 433 DOUT << "LICM sinking instruction: " << I; 434 435 std::vector<BasicBlock*> ExitBlocks; 436 CurLoop->getExitBlocks(ExitBlocks); 437 438 if (isa<LoadInst>(I)) ++NumMovedLoads; 439 else if (isa<CallInst>(I)) ++NumMovedCalls; 440 ++NumSunk; 441 Changed = true; 442 443 // The case where there is only a single exit node of this loop is common 444 // enough that we handle it as a special (more efficient) case. It is more 445 // efficient to handle because there are no PHI nodes that need to be placed. 446 if (ExitBlocks.size() == 1) { 447 if (!isExitBlockDominatedByBlockInLoop(ExitBlocks[0], I.getParent())) { 448 // Instruction is not used, just delete it. 449 CurAST->deleteValue(&I); 450 if (!I.use_empty()) // If I has users in unreachable blocks, eliminate. 451 I.replaceAllUsesWith(UndefValue::get(I.getType())); 452 I.eraseFromParent(); 453 } else { 454 // Move the instruction to the start of the exit block, after any PHI 455 // nodes in it. 456 I.removeFromParent(); 457 458 BasicBlock::iterator InsertPt = ExitBlocks[0]->begin(); 459 while (isa<PHINode>(InsertPt)) ++InsertPt; 460 ExitBlocks[0]->getInstList().insert(InsertPt, &I); 461 } 462 } else if (ExitBlocks.size() == 0) { 463 // The instruction is actually dead if there ARE NO exit blocks. 464 CurAST->deleteValue(&I); 465 if (!I.use_empty()) // If I has users in unreachable blocks, eliminate. 466 I.replaceAllUsesWith(UndefValue::get(I.getType())); 467 I.eraseFromParent(); 468 } else { 469 // Otherwise, if we have multiple exits, use the PromoteMem2Reg function to 470 // do all of the hard work of inserting PHI nodes as necessary. We convert 471 // the value into a stack object to get it to do this. 472 473 // Firstly, we create a stack object to hold the value... 474 AllocaInst *AI = 0; 475 476 if (I.getType() != Type::VoidTy) 477 AI = new AllocaInst(I.getType(), 0, I.getName(), 478 I.getParent()->getParent()->front().begin()); 479 480 // Secondly, insert load instructions for each use of the instruction 481 // outside of the loop. 482 while (!I.use_empty()) { 483 Instruction *U = cast<Instruction>(I.use_back()); 484 485 // If the user is a PHI Node, we actually have to insert load instructions 486 // in all predecessor blocks, not in the PHI block itself! 487 if (PHINode *UPN = dyn_cast<PHINode>(U)) { 488 // Only insert into each predecessor once, so that we don't have 489 // different incoming values from the same block! 490 std::map<BasicBlock*, Value*> InsertedBlocks; 491 for (unsigned i = 0, e = UPN->getNumIncomingValues(); i != e; ++i) 492 if (UPN->getIncomingValue(i) == &I) { 493 BasicBlock *Pred = UPN->getIncomingBlock(i); 494 Value *&PredVal = InsertedBlocks[Pred]; 495 if (!PredVal) { 496 // Insert a new load instruction right before the terminator in 497 // the predecessor block. 498 PredVal = new LoadInst(AI, "", Pred->getTerminator()); 499 } 500 501 UPN->setIncomingValue(i, PredVal); 502 } 503 504 } else { 505 LoadInst *L = new LoadInst(AI, "", U); 506 U->replaceUsesOfWith(&I, L); 507 } 508 } 509 510 // Thirdly, insert a copy of the instruction in each exit block of the loop 511 // that is dominated by the instruction, storing the result into the memory 512 // location. Be careful not to insert the instruction into any particular 513 // basic block more than once. 514 std::set<BasicBlock*> InsertedBlocks; 515 BasicBlock *InstOrigBB = I.getParent(); 516 517 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { 518 BasicBlock *ExitBlock = ExitBlocks[i]; 519 520 if (isExitBlockDominatedByBlockInLoop(ExitBlock, InstOrigBB)) { 521 // If we haven't already processed this exit block, do so now. 522 if (InsertedBlocks.insert(ExitBlock).second) { 523 // Insert the code after the last PHI node... 524 BasicBlock::iterator InsertPt = ExitBlock->begin(); 525 while (isa<PHINode>(InsertPt)) ++InsertPt; 526 527 // If this is the first exit block processed, just move the original 528 // instruction, otherwise clone the original instruction and insert 529 // the copy. 530 Instruction *New; 531 if (InsertedBlocks.size() == 1) { 532 I.removeFromParent(); 533 ExitBlock->getInstList().insert(InsertPt, &I); 534 New = &I; 535 } else { 536 New = I.clone(); 537 CurAST->copyValue(&I, New); 538 if (!I.getName().empty()) 539 New->setName(I.getName()+".le"); 540 ExitBlock->getInstList().insert(InsertPt, New); 541 } 542 543 // Now that we have inserted the instruction, store it into the alloca 544 if (AI) new StoreInst(New, AI, InsertPt); 545 } 546 } 547 } 548 549 // If the instruction doesn't dominate any exit blocks, it must be dead. 550 if (InsertedBlocks.empty()) { 551 CurAST->deleteValue(&I); 552 I.eraseFromParent(); 553 } 554 555 // Finally, promote the fine value to SSA form. 556 if (AI) { 557 std::vector<AllocaInst*> Allocas; 558 Allocas.push_back(AI); 559 PromoteMemToReg(Allocas, *DT, *DF, AA->getTargetData(), CurAST); 560 } 561 } 562} 563 564/// hoist - When an instruction is found to only use loop invariant operands 565/// that is safe to hoist, this instruction is called to do the dirty work. 566/// 567void LICM::hoist(Instruction &I) { 568 DOUT << "LICM hoisting to " << Preheader->getName() << ": " << I; 569 570 // Remove the instruction from its current basic block... but don't delete the 571 // instruction. 572 I.removeFromParent(); 573 574 // Insert the new node in Preheader, before the terminator. 575 Preheader->getInstList().insert(Preheader->getTerminator(), &I); 576 577 if (isa<LoadInst>(I)) ++NumMovedLoads; 578 else if (isa<CallInst>(I)) ++NumMovedCalls; 579 ++NumHoisted; 580 Changed = true; 581} 582 583/// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it is 584/// not a trapping instruction or if it is a trapping instruction and is 585/// guaranteed to execute. 586/// 587bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) { 588 // If it is not a trapping instruction, it is always safe to hoist. 589 if (!Inst.isTrapping()) return true; 590 591 // Otherwise we have to check to make sure that the instruction dominates all 592 // of the exit blocks. If it doesn't, then there is a path out of the loop 593 // which does not execute this instruction, so we can't hoist it. 594 595 // If the instruction is in the header block for the loop (which is very 596 // common), it is always guaranteed to dominate the exit blocks. Since this 597 // is a common case, and can save some work, check it now. 598 if (Inst.getParent() == CurLoop->getHeader()) 599 return true; 600 601 // It's always safe to load from a global or alloca. 602 if (isa<LoadInst>(Inst)) 603 if (isa<AllocationInst>(Inst.getOperand(0)) || 604 isa<GlobalVariable>(Inst.getOperand(0))) 605 return true; 606 607 // Get the exit blocks for the current loop. 608 std::vector<BasicBlock*> ExitBlocks; 609 CurLoop->getExitBlocks(ExitBlocks); 610 611 // For each exit block, get the DT node and walk up the DT until the 612 // instruction's basic block is found or we exit the loop. 613 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 614 if (!isExitBlockDominatedByBlockInLoop(ExitBlocks[i], Inst.getParent())) 615 return false; 616 617 return true; 618} 619 620 621/// PromoteValuesInLoop - Try to promote memory values to scalars by sinking 622/// stores out of the loop and moving loads to before the loop. We do this by 623/// looping over the stores in the loop, looking for stores to Must pointers 624/// which are loop invariant. We promote these memory locations to use allocas 625/// instead. These allocas can easily be raised to register values by the 626/// PromoteMem2Reg functionality. 627/// 628void LICM::PromoteValuesInLoop() { 629 // PromotedValues - List of values that are promoted out of the loop. Each 630 // value has an alloca instruction for it, and a canonical version of the 631 // pointer. 632 std::vector<std::pair<AllocaInst*, Value*> > PromotedValues; 633 std::map<Value*, AllocaInst*> ValueToAllocaMap; // Map of ptr to alloca 634 635 FindPromotableValuesInLoop(PromotedValues, ValueToAllocaMap); 636 if (ValueToAllocaMap.empty()) return; // If there are values to promote. 637 638 Changed = true; 639 NumPromoted += PromotedValues.size(); 640 641 std::vector<Value*> PointerValueNumbers; 642 643 // Emit a copy from the value into the alloca'd value in the loop preheader 644 TerminatorInst *LoopPredInst = Preheader->getTerminator(); 645 for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) { 646 Value *Ptr = PromotedValues[i].second; 647 648 // If we are promoting a pointer value, update alias information for the 649 // inserted load. 650 Value *LoadValue = 0; 651 if (isa<PointerType>(cast<PointerType>(Ptr->getType())->getElementType())) { 652 // Locate a load or store through the pointer, and assign the same value 653 // to LI as we are loading or storing. Since we know that the value is 654 // stored in this loop, this will always succeed. 655 for (Value::use_iterator UI = Ptr->use_begin(), E = Ptr->use_end(); 656 UI != E; ++UI) 657 if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 658 LoadValue = LI; 659 break; 660 } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) { 661 if (SI->getOperand(1) == Ptr) { 662 LoadValue = SI->getOperand(0); 663 break; 664 } 665 } 666 assert(LoadValue && "No store through the pointer found!"); 667 PointerValueNumbers.push_back(LoadValue); // Remember this for later. 668 } 669 670 // Load from the memory we are promoting. 671 LoadInst *LI = new LoadInst(Ptr, Ptr->getName()+".promoted", LoopPredInst); 672 673 if (LoadValue) CurAST->copyValue(LoadValue, LI); 674 675 // Store into the temporary alloca. 676 new StoreInst(LI, PromotedValues[i].first, LoopPredInst); 677 } 678 679 // Scan the basic blocks in the loop, replacing uses of our pointers with 680 // uses of the allocas in question. 681 // 682 const std::vector<BasicBlock*> &LoopBBs = CurLoop->getBlocks(); 683 for (std::vector<BasicBlock*>::const_iterator I = LoopBBs.begin(), 684 E = LoopBBs.end(); I != E; ++I) { 685 // Rewrite all loads and stores in the block of the pointer... 686 for (BasicBlock::iterator II = (*I)->begin(), E = (*I)->end(); 687 II != E; ++II) { 688 if (LoadInst *L = dyn_cast<LoadInst>(II)) { 689 std::map<Value*, AllocaInst*>::iterator 690 I = ValueToAllocaMap.find(L->getOperand(0)); 691 if (I != ValueToAllocaMap.end()) 692 L->setOperand(0, I->second); // Rewrite load instruction... 693 } else if (StoreInst *S = dyn_cast<StoreInst>(II)) { 694 std::map<Value*, AllocaInst*>::iterator 695 I = ValueToAllocaMap.find(S->getOperand(1)); 696 if (I != ValueToAllocaMap.end()) 697 S->setOperand(1, I->second); // Rewrite store instruction... 698 } 699 } 700 } 701 702 // Now that the body of the loop uses the allocas instead of the original 703 // memory locations, insert code to copy the alloca value back into the 704 // original memory location on all exits from the loop. Note that we only 705 // want to insert one copy of the code in each exit block, though the loop may 706 // exit to the same block more than once. 707 // 708 std::set<BasicBlock*> ProcessedBlocks; 709 710 std::vector<BasicBlock*> ExitBlocks; 711 CurLoop->getExitBlocks(ExitBlocks); 712 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 713 if (ProcessedBlocks.insert(ExitBlocks[i]).second) { 714 // Copy all of the allocas into their memory locations. 715 BasicBlock::iterator BI = ExitBlocks[i]->begin(); 716 while (isa<PHINode>(*BI)) 717 ++BI; // Skip over all of the phi nodes in the block. 718 Instruction *InsertPos = BI; 719 unsigned PVN = 0; 720 for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) { 721 // Load from the alloca. 722 LoadInst *LI = new LoadInst(PromotedValues[i].first, "", InsertPos); 723 724 // If this is a pointer type, update alias info appropriately. 725 if (isa<PointerType>(LI->getType())) 726 CurAST->copyValue(PointerValueNumbers[PVN++], LI); 727 728 // Store into the memory we promoted. 729 new StoreInst(LI, PromotedValues[i].second, InsertPos); 730 } 731 } 732 733 // Now that we have done the deed, use the mem2reg functionality to promote 734 // all of the new allocas we just created into real SSA registers. 735 // 736 std::vector<AllocaInst*> PromotedAllocas; 737 PromotedAllocas.reserve(PromotedValues.size()); 738 for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) 739 PromotedAllocas.push_back(PromotedValues[i].first); 740 PromoteMemToReg(PromotedAllocas, *DT, *DF, AA->getTargetData(), CurAST); 741} 742 743/// FindPromotableValuesInLoop - Check the current loop for stores to definite 744/// pointers, which are not loaded and stored through may aliases. If these are 745/// found, create an alloca for the value, add it to the PromotedValues list, 746/// and keep track of the mapping from value to alloca. 747/// 748void LICM::FindPromotableValuesInLoop( 749 std::vector<std::pair<AllocaInst*, Value*> > &PromotedValues, 750 std::map<Value*, AllocaInst*> &ValueToAllocaMap) { 751 Instruction *FnStart = CurLoop->getHeader()->getParent()->begin()->begin(); 752 753 // Loop over all of the alias sets in the tracker object. 754 for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end(); 755 I != E; ++I) { 756 AliasSet &AS = *I; 757 // We can promote this alias set if it has a store, if it is a "Must" alias 758 // set, if the pointer is loop invariant, and if we are not eliminating any 759 // volatile loads or stores. 760 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias() && 761 !AS.isVolatile() && CurLoop->isLoopInvariant(AS.begin()->first)) { 762 assert(AS.begin() != AS.end() && 763 "Must alias set should have at least one pointer element in it!"); 764 Value *V = AS.begin()->first; 765 766 // Check that all of the pointers in the alias set have the same type. We 767 // cannot (yet) promote a memory location that is loaded and stored in 768 // different sizes. 769 bool PointerOk = true; 770 for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I) 771 if (V->getType() != I->first->getType()) { 772 PointerOk = false; 773 break; 774 } 775 776 if (PointerOk) { 777 const Type *Ty = cast<PointerType>(V->getType())->getElementType(); 778 AllocaInst *AI = new AllocaInst(Ty, 0, V->getName()+".tmp", FnStart); 779 PromotedValues.push_back(std::make_pair(AI, V)); 780 781 // Update the AST and alias analysis. 782 CurAST->copyValue(V, AI); 783 784 for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I) 785 ValueToAllocaMap.insert(std::make_pair(I->first, AI)); 786 787 DOUT << "LICM: Promoting value: " << *V << "\n"; 788 } 789 } 790 } 791} 792