LICM.cpp revision df07335b4648c4cc255343081fdf61319d90431d
1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass performs loop invariant code motion, attempting to remove as much
11// code from the body of a loop as possible.  It does this by either hoisting
12// code into the preheader block, or by sinking code to the exit blocks if it is
13// safe.  This pass also promotes must-aliased memory locations in the loop to
14// live in registers, thus hoisting and sinking "invariant" loads and stores.
15//
16// This pass uses alias analysis for two purposes:
17//
18//  1. Moving loop invariant loads and calls out of loops.  If we can determine
19//     that a load or call inside of a loop never aliases anything stored to,
20//     we can hoist it or sink it like any other instruction.
21//  2. Scalar Promotion of Memory - If there is a store instruction inside of
22//     the loop, we try to move the store to happen AFTER the loop instead of
23//     inside of the loop.  This can only happen if a few conditions are true:
24//       A. The pointer stored through is loop invariant
25//       B. There are no stores or loads in the loop which _may_ alias the
26//          pointer.  There are no calls in the loop which mod/ref the pointer.
27//     If these conditions are true, we can promote the loads and stores in the
28//     loop of the pointer to use a temporary alloca'd variable.  We then use
29//     the mem2reg functionality to construct the appropriate SSA form for the
30//     variable.
31//
32//===----------------------------------------------------------------------===//
33
34#define DEBUG_TYPE "licm"
35#include "llvm/Transforms/Scalar.h"
36#include "llvm/Constants.h"
37#include "llvm/DerivedTypes.h"
38#include "llvm/Instructions.h"
39#include "llvm/Target/TargetData.h"
40#include "llvm/Analysis/LoopInfo.h"
41#include "llvm/Analysis/LoopPass.h"
42#include "llvm/Analysis/AliasAnalysis.h"
43#include "llvm/Analysis/AliasSetTracker.h"
44#include "llvm/Analysis/Dominators.h"
45#include "llvm/Transforms/Utils/PromoteMemToReg.h"
46#include "llvm/Support/CFG.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/CommandLine.h"
49#include "llvm/Support/Debug.h"
50#include "llvm/ADT/Statistic.h"
51#include <algorithm>
52using namespace llvm;
53
54STATISTIC(NumSunk      , "Number of instructions sunk out of loop");
55STATISTIC(NumHoisted   , "Number of instructions hoisted out of loop");
56STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
57STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
58STATISTIC(NumPromoted  , "Number of memory locations promoted to registers");
59
60namespace {
61  cl::opt<bool>
62  DisablePromotion("disable-licm-promotion", cl::Hidden,
63                   cl::desc("Disable memory promotion in LICM pass"));
64
65  struct VISIBILITY_HIDDEN LICM : public LoopPass {
66    virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
67
68    /// This transformation requires natural loop information & requires that
69    /// loop preheaders be inserted into the CFG...
70    ///
71    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
72      AU.setPreservesCFG();
73      AU.addRequiredID(LoopSimplifyID);
74      AU.addRequired<LoopInfo>();
75      AU.addRequired<ETForest>();
76      AU.addRequired<DominanceFrontier>();  // For scalar promotion (mem2reg)
77      AU.addRequired<AliasAnalysis>();
78    }
79
80    bool doFinalization() {
81      LoopToAliasMap.clear();
82      return false;
83    }
84
85  private:
86    // Various analyses that we use...
87    AliasAnalysis *AA;       // Current AliasAnalysis information
88    LoopInfo      *LI;       // Current LoopInfo
89    ETForest *ET;       // ETForest for the current Loop...
90    DominanceFrontier *DF;   // Current Dominance Frontier
91
92    // State that is updated as we process loops
93    bool Changed;            // Set to true when we change anything.
94    BasicBlock *Preheader;   // The preheader block of the current loop...
95    Loop *CurLoop;           // The current loop we are working on...
96    AliasSetTracker *CurAST; // AliasSet information for the current loop...
97    std::map<Loop *, AliasSetTracker *> LoopToAliasMap;
98
99    /// SinkRegion - Walk the specified region of the CFG (defined by all blocks
100    /// dominated by the specified block, and that are in the current loop) in
101    /// reverse depth first order w.r.t the ETForest.  This allows us to
102    /// visit uses before definitions, allowing us to sink a loop body in one
103    /// pass without iteration.
104    ///
105    void SinkRegion(BasicBlock *BB);
106
107    /// HoistRegion - Walk the specified region of the CFG (defined by all
108    /// blocks dominated by the specified block, and that are in the current
109    /// loop) in depth first order w.r.t the ETForest.  This allows us to
110    /// visit definitions before uses, allowing us to hoist a loop body in one
111    /// pass without iteration.
112    ///
113    void HoistRegion(BasicBlock *BB);
114
115    /// inSubLoop - Little predicate that returns true if the specified basic
116    /// block is in a subloop of the current one, not the current one itself.
117    ///
118    bool inSubLoop(BasicBlock *BB) {
119      assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
120      for (Loop::iterator I = CurLoop->begin(), E = CurLoop->end(); I != E; ++I)
121        if ((*I)->contains(BB))
122          return true;  // A subloop actually contains this block!
123      return false;
124    }
125
126    /// isExitBlockDominatedByBlockInLoop - This method checks to see if the
127    /// specified exit block of the loop is dominated by the specified block
128    /// that is in the body of the loop.  We use these constraints to
129    /// dramatically limit the amount of the dominator tree that needs to be
130    /// searched.
131    bool isExitBlockDominatedByBlockInLoop(BasicBlock *ExitBlock,
132                                           BasicBlock *BlockInLoop) const {
133      // If the block in the loop is the loop header, it must be dominated!
134      BasicBlock *LoopHeader = CurLoop->getHeader();
135      if (BlockInLoop == LoopHeader)
136        return true;
137
138      BasicBlock *IDom = ExitBlock;
139
140      // Because the exit block is not in the loop, we know we have to get _at
141      // least_ its immediate dominator.
142      do {
143        // Get next Immediate Dominator.
144        IDom = ET->getIDom(IDom);
145
146        // If we have got to the header of the loop, then the instructions block
147        // did not dominate the exit node, so we can't hoist it.
148        if (IDom == LoopHeader)
149          return false;
150
151      } while (IDom != BlockInLoop);
152
153      return true;
154    }
155
156    /// sink - When an instruction is found to only be used outside of the loop,
157    /// this function moves it to the exit blocks and patches up SSA form as
158    /// needed.
159    ///
160    void sink(Instruction &I);
161
162    /// hoist - When an instruction is found to only use loop invariant operands
163    /// that is safe to hoist, this instruction is called to do the dirty work.
164    ///
165    void hoist(Instruction &I);
166
167    /// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it
168    /// is not a trapping instruction or if it is a trapping instruction and is
169    /// guaranteed to execute.
170    ///
171    bool isSafeToExecuteUnconditionally(Instruction &I);
172
173    /// pointerInvalidatedByLoop - Return true if the body of this loop may
174    /// store into the memory location pointed to by V.
175    ///
176    bool pointerInvalidatedByLoop(Value *V, unsigned Size) {
177      // Check to see if any of the basic blocks in CurLoop invalidate *V.
178      return CurAST->getAliasSetForPointer(V, Size).isMod();
179    }
180
181    bool canSinkOrHoistInst(Instruction &I);
182    bool isLoopInvariantInst(Instruction &I);
183    bool isNotUsedInLoop(Instruction &I);
184
185    /// PromoteValuesInLoop - Look at the stores in the loop and promote as many
186    /// to scalars as we can.
187    ///
188    void PromoteValuesInLoop();
189
190    /// FindPromotableValuesInLoop - Check the current loop for stores to
191    /// definite pointers, which are not loaded and stored through may aliases.
192    /// If these are found, create an alloca for the value, add it to the
193    /// PromotedValues list, and keep track of the mapping from value to
194    /// alloca...
195    ///
196    void FindPromotableValuesInLoop(
197                   std::vector<std::pair<AllocaInst*, Value*> > &PromotedValues,
198                                    std::map<Value*, AllocaInst*> &Val2AlMap);
199  };
200
201  RegisterPass<LICM> X("licm", "Loop Invariant Code Motion");
202}
203
204LoopPass *llvm::createLICMPass() { return new LICM(); }
205
206/// Hoist expressions out of the specified loop...
207///
208bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
209  Changed = false;
210
211  // Get our Loop and Alias Analysis information...
212  LI = &getAnalysis<LoopInfo>();
213  AA = &getAnalysis<AliasAnalysis>();
214  DF = &getAnalysis<DominanceFrontier>();
215  ET = &getAnalysis<ETForest>();
216
217  CurAST = new AliasSetTracker(*AA);
218  // Collect Alias info frmo subloops
219  for (Loop::iterator LoopItr = L->begin(), LoopItrE = L->end();
220       LoopItr != LoopItrE; ++LoopItr) {
221    Loop *InnerL = *LoopItr;
222    AliasSetTracker *InnerAST = LoopToAliasMap[InnerL];
223    assert (InnerAST && "Where is my AST?");
224
225    // What if InnerLoop was modified by other passes ?
226    CurAST->add(*InnerAST);
227  }
228
229  CurLoop = L;
230
231  // Get the preheader block to move instructions into...
232  Preheader = L->getLoopPreheader();
233  assert(Preheader&&"Preheader insertion pass guarantees we have a preheader!");
234
235  // Loop over the body of this loop, looking for calls, invokes, and stores.
236  // Because subloops have already been incorporated into AST, we skip blocks in
237  // subloops.
238  //
239  for (std::vector<BasicBlock*>::const_iterator I = L->getBlocks().begin(),
240         E = L->getBlocks().end(); I != E; ++I)
241    if (LI->getLoopFor(*I) == L)        // Ignore blocks in subloops...
242      CurAST->add(**I);                 // Incorporate the specified basic block
243
244  // We want to visit all of the instructions in this loop... that are not parts
245  // of our subloops (they have already had their invariants hoisted out of
246  // their loop, into this loop, so there is no need to process the BODIES of
247  // the subloops).
248  //
249  // Traverse the body of the loop in depth first order on the dominator tree so
250  // that we are guaranteed to see definitions before we see uses.  This allows
251  // us to sink instructions in one pass, without iteration.  AFter sinking
252  // instructions, we perform another pass to hoist them out of the loop.
253  //
254  SinkRegion(L->getHeader());
255  HoistRegion(L->getHeader());
256
257  // Now that all loop invariants have been removed from the loop, promote any
258  // memory references to scalars that we can...
259  if (!DisablePromotion)
260    PromoteValuesInLoop();
261
262  // Clear out loops state information for the next iteration
263  CurLoop = 0;
264  Preheader = 0;
265
266  LoopToAliasMap[L] = CurAST;
267  return Changed;
268}
269
270/// SinkRegion - Walk the specified region of the CFG (defined by all blocks
271/// dominated by the specified block, and that are in the current loop) in
272/// reverse depth first order w.r.t the ETForest.  This allows us to visit
273/// uses before definitions, allowing us to sink a loop body in one pass without
274/// iteration.
275///
276void LICM::SinkRegion(BasicBlock *BB) {
277  assert(BB != 0 && "Null sink block?");
278
279  // If this subregion is not in the top level loop at all, exit.
280  if (!CurLoop->contains(BB)) return;
281
282  // We are processing blocks in reverse dfo, so process children first...
283  std::vector<BasicBlock*> Children;
284  ET->getChildren(BB, Children);
285  for (unsigned i = 0, e = Children.size(); i != e; ++i)
286    SinkRegion(Children[i]);
287
288  // Only need to process the contents of this block if it is not part of a
289  // subloop (which would already have been processed).
290  if (inSubLoop(BB)) return;
291
292  for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) {
293    Instruction &I = *--II;
294
295    // Check to see if we can sink this instruction to the exit blocks
296    // of the loop.  We can do this if the all users of the instruction are
297    // outside of the loop.  In this case, it doesn't even matter if the
298    // operands of the instruction are loop invariant.
299    //
300    if (isNotUsedInLoop(I) && canSinkOrHoistInst(I)) {
301      ++II;
302      sink(I);
303    }
304  }
305}
306
307
308/// HoistRegion - Walk the specified region of the CFG (defined by all blocks
309/// dominated by the specified block, and that are in the current loop) in depth
310/// first order w.r.t the DominatorTree.  This allows us to visit definitions
311/// before uses, allowing us to hoist a loop body in one pass without iteration.
312///
313void LICM::HoistRegion(BasicBlock *BB) {
314  assert(BB != 0 && "Null hoist block?");
315
316  // If this subregion is not in the top level loop at all, exit.
317  if (!CurLoop->contains(BB)) return;
318
319  // Only need to process the contents of this block if it is not part of a
320  // subloop (which would already have been processed).
321  if (!inSubLoop(BB))
322    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) {
323      Instruction &I = *II++;
324
325      // Try hoisting the instruction out to the preheader.  We can only do this
326      // if all of the operands of the instruction are loop invariant and if it
327      // is safe to hoist the instruction.
328      //
329      if (isLoopInvariantInst(I) && canSinkOrHoistInst(I) &&
330          isSafeToExecuteUnconditionally(I))
331        hoist(I);
332      }
333
334  std::vector<BasicBlock*> Children;
335  ET->getChildren(BB, Children);
336  for (unsigned i = 0, e = Children.size(); i != e; ++i)
337    HoistRegion(Children[i]);
338}
339
340/// canSinkOrHoistInst - Return true if the hoister and sinker can handle this
341/// instruction.
342///
343bool LICM::canSinkOrHoistInst(Instruction &I) {
344  // Loads have extra constraints we have to verify before we can hoist them.
345  if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
346    if (LI->isVolatile())
347      return false;        // Don't hoist volatile loads!
348
349    // Don't hoist loads which have may-aliased stores in loop.
350    unsigned Size = 0;
351    if (LI->getType()->isSized())
352      Size = AA->getTargetData().getTypeSize(LI->getType());
353    return !pointerInvalidatedByLoop(LI->getOperand(0), Size);
354  } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
355    // Handle obvious cases efficiently.
356    if (Function *Callee = CI->getCalledFunction()) {
357      AliasAnalysis::ModRefBehavior Behavior =AA->getModRefBehavior(Callee, CI);
358      if (Behavior == AliasAnalysis::DoesNotAccessMemory)
359        return true;
360      else if (Behavior == AliasAnalysis::OnlyReadsMemory) {
361        // If this call only reads from memory and there are no writes to memory
362        // in the loop, we can hoist or sink the call as appropriate.
363        bool FoundMod = false;
364        for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
365             I != E; ++I) {
366          AliasSet &AS = *I;
367          if (!AS.isForwardingAliasSet() && AS.isMod()) {
368            FoundMod = true;
369            break;
370          }
371        }
372        if (!FoundMod) return true;
373      }
374    }
375
376    // FIXME: This should use mod/ref information to see if we can hoist or sink
377    // the call.
378
379    return false;
380  }
381
382  // Otherwise these instructions are hoistable/sinkable
383  return isa<BinaryOperator>(I) || isa<CastInst>(I) ||
384         isa<SelectInst>(I) || isa<GetElementPtrInst>(I) || isa<CmpInst>(I);
385}
386
387/// isNotUsedInLoop - Return true if the only users of this instruction are
388/// outside of the loop.  If this is true, we can sink the instruction to the
389/// exit blocks of the loop.
390///
391bool LICM::isNotUsedInLoop(Instruction &I) {
392  for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI) {
393    Instruction *User = cast<Instruction>(*UI);
394    if (PHINode *PN = dyn_cast<PHINode>(User)) {
395      // PHI node uses occur in predecessor blocks!
396      for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
397        if (PN->getIncomingValue(i) == &I)
398          if (CurLoop->contains(PN->getIncomingBlock(i)))
399            return false;
400    } else if (CurLoop->contains(User->getParent())) {
401      return false;
402    }
403  }
404  return true;
405}
406
407
408/// isLoopInvariantInst - Return true if all operands of this instruction are
409/// loop invariant.  We also filter out non-hoistable instructions here just for
410/// efficiency.
411///
412bool LICM::isLoopInvariantInst(Instruction &I) {
413  // The instruction is loop invariant if all of its operands are loop-invariant
414  for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
415    if (!CurLoop->isLoopInvariant(I.getOperand(i)))
416      return false;
417
418  // If we got this far, the instruction is loop invariant!
419  return true;
420}
421
422/// sink - When an instruction is found to only be used outside of the loop,
423/// this function moves it to the exit blocks and patches up SSA form as needed.
424/// This method is guaranteed to remove the original instruction from its
425/// position, and may either delete it or move it to outside of the loop.
426///
427void LICM::sink(Instruction &I) {
428  DOUT << "LICM sinking instruction: " << I;
429
430  std::vector<BasicBlock*> ExitBlocks;
431  CurLoop->getExitBlocks(ExitBlocks);
432
433  if (isa<LoadInst>(I)) ++NumMovedLoads;
434  else if (isa<CallInst>(I)) ++NumMovedCalls;
435  ++NumSunk;
436  Changed = true;
437
438  // The case where there is only a single exit node of this loop is common
439  // enough that we handle it as a special (more efficient) case.  It is more
440  // efficient to handle because there are no PHI nodes that need to be placed.
441  if (ExitBlocks.size() == 1) {
442    if (!isExitBlockDominatedByBlockInLoop(ExitBlocks[0], I.getParent())) {
443      // Instruction is not used, just delete it.
444      CurAST->deleteValue(&I);
445      if (!I.use_empty())  // If I has users in unreachable blocks, eliminate.
446        I.replaceAllUsesWith(UndefValue::get(I.getType()));
447      I.eraseFromParent();
448    } else {
449      // Move the instruction to the start of the exit block, after any PHI
450      // nodes in it.
451      I.removeFromParent();
452
453      BasicBlock::iterator InsertPt = ExitBlocks[0]->begin();
454      while (isa<PHINode>(InsertPt)) ++InsertPt;
455      ExitBlocks[0]->getInstList().insert(InsertPt, &I);
456    }
457  } else if (ExitBlocks.size() == 0) {
458    // The instruction is actually dead if there ARE NO exit blocks.
459    CurAST->deleteValue(&I);
460    if (!I.use_empty())  // If I has users in unreachable blocks, eliminate.
461      I.replaceAllUsesWith(UndefValue::get(I.getType()));
462    I.eraseFromParent();
463  } else {
464    // Otherwise, if we have multiple exits, use the PromoteMem2Reg function to
465    // do all of the hard work of inserting PHI nodes as necessary.  We convert
466    // the value into a stack object to get it to do this.
467
468    // Firstly, we create a stack object to hold the value...
469    AllocaInst *AI = 0;
470
471    if (I.getType() != Type::VoidTy)
472      AI = new AllocaInst(I.getType(), 0, I.getName(),
473                          I.getParent()->getParent()->getEntryBlock().begin());
474
475    // Secondly, insert load instructions for each use of the instruction
476    // outside of the loop.
477    while (!I.use_empty()) {
478      Instruction *U = cast<Instruction>(I.use_back());
479
480      // If the user is a PHI Node, we actually have to insert load instructions
481      // in all predecessor blocks, not in the PHI block itself!
482      if (PHINode *UPN = dyn_cast<PHINode>(U)) {
483        // Only insert into each predecessor once, so that we don't have
484        // different incoming values from the same block!
485        std::map<BasicBlock*, Value*> InsertedBlocks;
486        for (unsigned i = 0, e = UPN->getNumIncomingValues(); i != e; ++i)
487          if (UPN->getIncomingValue(i) == &I) {
488            BasicBlock *Pred = UPN->getIncomingBlock(i);
489            Value *&PredVal = InsertedBlocks[Pred];
490            if (!PredVal) {
491              // Insert a new load instruction right before the terminator in
492              // the predecessor block.
493              PredVal = new LoadInst(AI, "", Pred->getTerminator());
494            }
495
496            UPN->setIncomingValue(i, PredVal);
497          }
498
499      } else {
500        LoadInst *L = new LoadInst(AI, "", U);
501        U->replaceUsesOfWith(&I, L);
502      }
503    }
504
505    // Thirdly, insert a copy of the instruction in each exit block of the loop
506    // that is dominated by the instruction, storing the result into the memory
507    // location.  Be careful not to insert the instruction into any particular
508    // basic block more than once.
509    std::set<BasicBlock*> InsertedBlocks;
510    BasicBlock *InstOrigBB = I.getParent();
511
512    for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
513      BasicBlock *ExitBlock = ExitBlocks[i];
514
515      if (isExitBlockDominatedByBlockInLoop(ExitBlock, InstOrigBB)) {
516        // If we haven't already processed this exit block, do so now.
517        if (InsertedBlocks.insert(ExitBlock).second) {
518          // Insert the code after the last PHI node...
519          BasicBlock::iterator InsertPt = ExitBlock->begin();
520          while (isa<PHINode>(InsertPt)) ++InsertPt;
521
522          // If this is the first exit block processed, just move the original
523          // instruction, otherwise clone the original instruction and insert
524          // the copy.
525          Instruction *New;
526          if (InsertedBlocks.size() == 1) {
527            I.removeFromParent();
528            ExitBlock->getInstList().insert(InsertPt, &I);
529            New = &I;
530          } else {
531            New = I.clone();
532            CurAST->copyValue(&I, New);
533            if (!I.getName().empty())
534              New->setName(I.getName()+".le");
535            ExitBlock->getInstList().insert(InsertPt, New);
536          }
537
538          // Now that we have inserted the instruction, store it into the alloca
539          if (AI) new StoreInst(New, AI, InsertPt);
540        }
541      }
542    }
543
544    // If the instruction doesn't dominate any exit blocks, it must be dead.
545    if (InsertedBlocks.empty()) {
546      CurAST->deleteValue(&I);
547      I.eraseFromParent();
548    }
549
550    // Finally, promote the fine value to SSA form.
551    if (AI) {
552      std::vector<AllocaInst*> Allocas;
553      Allocas.push_back(AI);
554      PromoteMemToReg(Allocas, *ET, *DF, AA->getTargetData(), CurAST);
555    }
556  }
557}
558
559/// hoist - When an instruction is found to only use loop invariant operands
560/// that is safe to hoist, this instruction is called to do the dirty work.
561///
562void LICM::hoist(Instruction &I) {
563  DOUT << "LICM hoisting to " << Preheader->getName() << ": " << I;
564
565  // Remove the instruction from its current basic block... but don't delete the
566  // instruction.
567  I.removeFromParent();
568
569  // Insert the new node in Preheader, before the terminator.
570  Preheader->getInstList().insert(Preheader->getTerminator(), &I);
571
572  if (isa<LoadInst>(I)) ++NumMovedLoads;
573  else if (isa<CallInst>(I)) ++NumMovedCalls;
574  ++NumHoisted;
575  Changed = true;
576}
577
578/// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it is
579/// not a trapping instruction or if it is a trapping instruction and is
580/// guaranteed to execute.
581///
582bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
583  // If it is not a trapping instruction, it is always safe to hoist.
584  if (!Inst.isTrapping()) return true;
585
586  // Otherwise we have to check to make sure that the instruction dominates all
587  // of the exit blocks.  If it doesn't, then there is a path out of the loop
588  // which does not execute this instruction, so we can't hoist it.
589
590  // If the instruction is in the header block for the loop (which is very
591  // common), it is always guaranteed to dominate the exit blocks.  Since this
592  // is a common case, and can save some work, check it now.
593  if (Inst.getParent() == CurLoop->getHeader())
594    return true;
595
596  // It's always safe to load from a global or alloca.
597  if (isa<LoadInst>(Inst))
598    if (isa<AllocationInst>(Inst.getOperand(0)) ||
599        isa<GlobalVariable>(Inst.getOperand(0)))
600      return true;
601
602  // Get the exit blocks for the current loop.
603  std::vector<BasicBlock*> ExitBlocks;
604  CurLoop->getExitBlocks(ExitBlocks);
605
606  // For each exit block, walk up the ET until the
607  // instruction's basic block is found or we exit the loop.
608  for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
609    if (!isExitBlockDominatedByBlockInLoop(ExitBlocks[i], Inst.getParent()))
610      return false;
611
612  return true;
613}
614
615
616/// PromoteValuesInLoop - Try to promote memory values to scalars by sinking
617/// stores out of the loop and moving loads to before the loop.  We do this by
618/// looping over the stores in the loop, looking for stores to Must pointers
619/// which are loop invariant.  We promote these memory locations to use allocas
620/// instead.  These allocas can easily be raised to register values by the
621/// PromoteMem2Reg functionality.
622///
623void LICM::PromoteValuesInLoop() {
624  // PromotedValues - List of values that are promoted out of the loop.  Each
625  // value has an alloca instruction for it, and a canonical version of the
626  // pointer.
627  std::vector<std::pair<AllocaInst*, Value*> > PromotedValues;
628  std::map<Value*, AllocaInst*> ValueToAllocaMap; // Map of ptr to alloca
629
630  FindPromotableValuesInLoop(PromotedValues, ValueToAllocaMap);
631  if (ValueToAllocaMap.empty()) return;   // If there are values to promote.
632
633  Changed = true;
634  NumPromoted += PromotedValues.size();
635
636  std::vector<Value*> PointerValueNumbers;
637
638  // Emit a copy from the value into the alloca'd value in the loop preheader
639  TerminatorInst *LoopPredInst = Preheader->getTerminator();
640  for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) {
641    Value *Ptr = PromotedValues[i].second;
642
643    // If we are promoting a pointer value, update alias information for the
644    // inserted load.
645    Value *LoadValue = 0;
646    if (isa<PointerType>(cast<PointerType>(Ptr->getType())->getElementType())) {
647      // Locate a load or store through the pointer, and assign the same value
648      // to LI as we are loading or storing.  Since we know that the value is
649      // stored in this loop, this will always succeed.
650      for (Value::use_iterator UI = Ptr->use_begin(), E = Ptr->use_end();
651           UI != E; ++UI)
652        if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
653          LoadValue = LI;
654          break;
655        } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
656          if (SI->getOperand(1) == Ptr) {
657            LoadValue = SI->getOperand(0);
658            break;
659          }
660        }
661      assert(LoadValue && "No store through the pointer found!");
662      PointerValueNumbers.push_back(LoadValue);  // Remember this for later.
663    }
664
665    // Load from the memory we are promoting.
666    LoadInst *LI = new LoadInst(Ptr, Ptr->getName()+".promoted", LoopPredInst);
667
668    if (LoadValue) CurAST->copyValue(LoadValue, LI);
669
670    // Store into the temporary alloca.
671    new StoreInst(LI, PromotedValues[i].first, LoopPredInst);
672  }
673
674  // Scan the basic blocks in the loop, replacing uses of our pointers with
675  // uses of the allocas in question.
676  //
677  const std::vector<BasicBlock*> &LoopBBs = CurLoop->getBlocks();
678  for (std::vector<BasicBlock*>::const_iterator I = LoopBBs.begin(),
679         E = LoopBBs.end(); I != E; ++I) {
680    // Rewrite all loads and stores in the block of the pointer...
681    for (BasicBlock::iterator II = (*I)->begin(), E = (*I)->end();
682         II != E; ++II) {
683      if (LoadInst *L = dyn_cast<LoadInst>(II)) {
684        std::map<Value*, AllocaInst*>::iterator
685          I = ValueToAllocaMap.find(L->getOperand(0));
686        if (I != ValueToAllocaMap.end())
687          L->setOperand(0, I->second);    // Rewrite load instruction...
688      } else if (StoreInst *S = dyn_cast<StoreInst>(II)) {
689        std::map<Value*, AllocaInst*>::iterator
690          I = ValueToAllocaMap.find(S->getOperand(1));
691        if (I != ValueToAllocaMap.end())
692          S->setOperand(1, I->second);    // Rewrite store instruction...
693      }
694    }
695  }
696
697  // Now that the body of the loop uses the allocas instead of the original
698  // memory locations, insert code to copy the alloca value back into the
699  // original memory location on all exits from the loop.  Note that we only
700  // want to insert one copy of the code in each exit block, though the loop may
701  // exit to the same block more than once.
702  //
703  std::set<BasicBlock*> ProcessedBlocks;
704
705  std::vector<BasicBlock*> ExitBlocks;
706  CurLoop->getExitBlocks(ExitBlocks);
707  for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
708    if (ProcessedBlocks.insert(ExitBlocks[i]).second) {
709      // Copy all of the allocas into their memory locations.
710      BasicBlock::iterator BI = ExitBlocks[i]->begin();
711      while (isa<PHINode>(*BI))
712        ++BI;             // Skip over all of the phi nodes in the block.
713      Instruction *InsertPos = BI;
714      unsigned PVN = 0;
715      for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) {
716        // Load from the alloca.
717        LoadInst *LI = new LoadInst(PromotedValues[i].first, "", InsertPos);
718
719        // If this is a pointer type, update alias info appropriately.
720        if (isa<PointerType>(LI->getType()))
721          CurAST->copyValue(PointerValueNumbers[PVN++], LI);
722
723        // Store into the memory we promoted.
724        new StoreInst(LI, PromotedValues[i].second, InsertPos);
725      }
726    }
727
728  // Now that we have done the deed, use the mem2reg functionality to promote
729  // all of the new allocas we just created into real SSA registers.
730  //
731  std::vector<AllocaInst*> PromotedAllocas;
732  PromotedAllocas.reserve(PromotedValues.size());
733  for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i)
734    PromotedAllocas.push_back(PromotedValues[i].first);
735  PromoteMemToReg(PromotedAllocas, *ET, *DF, AA->getTargetData(), CurAST);
736}
737
738/// FindPromotableValuesInLoop - Check the current loop for stores to definite
739/// pointers, which are not loaded and stored through may aliases.  If these are
740/// found, create an alloca for the value, add it to the PromotedValues list,
741/// and keep track of the mapping from value to alloca.
742///
743void LICM::FindPromotableValuesInLoop(
744                   std::vector<std::pair<AllocaInst*, Value*> > &PromotedValues,
745                             std::map<Value*, AllocaInst*> &ValueToAllocaMap) {
746  Instruction *FnStart = CurLoop->getHeader()->getParent()->begin()->begin();
747
748  // Loop over all of the alias sets in the tracker object.
749  for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
750       I != E; ++I) {
751    AliasSet &AS = *I;
752    // We can promote this alias set if it has a store, if it is a "Must" alias
753    // set, if the pointer is loop invariant, and if we are not eliminating any
754    // volatile loads or stores.
755    if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias() &&
756        !AS.isVolatile() && CurLoop->isLoopInvariant(AS.begin()->first)) {
757      assert(AS.begin() != AS.end() &&
758             "Must alias set should have at least one pointer element in it!");
759      Value *V = AS.begin()->first;
760
761      // Check that all of the pointers in the alias set have the same type.  We
762      // cannot (yet) promote a memory location that is loaded and stored in
763      // different sizes.
764      bool PointerOk = true;
765      for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I)
766        if (V->getType() != I->first->getType()) {
767          PointerOk = false;
768          break;
769        }
770
771      if (PointerOk) {
772        const Type *Ty = cast<PointerType>(V->getType())->getElementType();
773        AllocaInst *AI = new AllocaInst(Ty, 0, V->getName()+".tmp", FnStart);
774        PromotedValues.push_back(std::make_pair(AI, V));
775
776        // Update the AST and alias analysis.
777        CurAST->copyValue(V, AI);
778
779        for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I)
780          ValueToAllocaMap.insert(std::make_pair(I->first, AI));
781
782        DOUT << "LICM: Promoting value: " << *V << "\n";
783      }
784    }
785  }
786}
787