CodeGenPrepare.cpp revision 6b980541df5846ad335c377c8803b517968daee2
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass munges the code in the input function to better prepare it for
11// SelectionDAG-based code generation. This works around limitations in it's
12// basic-block-at-a-time approach. It should eventually be removed.
13//
14//===----------------------------------------------------------------------===//
15
16#define DEBUG_TYPE "codegenprepare"
17#include "llvm/Transforms/Scalar.h"
18#include "llvm/Constants.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Function.h"
21#include "llvm/InlineAsm.h"
22#include "llvm/Instructions.h"
23#include "llvm/IntrinsicInst.h"
24#include "llvm/Pass.h"
25#include "llvm/Analysis/Dominators.h"
26#include "llvm/Analysis/InstructionSimplify.h"
27#include "llvm/Analysis/ProfileInfo.h"
28#include "llvm/Target/TargetData.h"
29#include "llvm/Target/TargetLibraryInfo.h"
30#include "llvm/Target/TargetLowering.h"
31#include "llvm/Transforms/Utils/AddrModeMatcher.h"
32#include "llvm/Transforms/Utils/BasicBlockUtils.h"
33#include "llvm/Transforms/Utils/Local.h"
34#include "llvm/Transforms/Utils/BuildLibCalls.h"
35#include "llvm/ADT/DenseMap.h"
36#include "llvm/ADT/SmallSet.h"
37#include "llvm/ADT/Statistic.h"
38#include "llvm/Assembly/Writer.h"
39#include "llvm/Support/CallSite.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/GetElementPtrTypeIterator.h"
43#include "llvm/Support/PatternMatch.h"
44#include "llvm/Support/raw_ostream.h"
45#include "llvm/Support/IRBuilder.h"
46#include "llvm/Support/ValueHandle.h"
47using namespace llvm;
48using namespace llvm::PatternMatch;
49
50STATISTIC(NumBlocksElim, "Number of blocks eliminated");
51STATISTIC(NumPHIsElim,   "Number of trivial PHIs eliminated");
52STATISTIC(NumGEPsElim,   "Number of GEPs converted to casts");
53STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
54                      "sunken Cmps");
55STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
56                       "of sunken Casts");
57STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
58                          "computations were sunk");
59STATISTIC(NumExtsMoved,  "Number of [s|z]ext instructions combined with loads");
60STATISTIC(NumExtUses,    "Number of uses of [s|z]ext instructions optimized");
61STATISTIC(NumRetsDup,    "Number of return instructions duplicated");
62STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
63
64static cl::opt<bool> DisableBranchOpts(
65  "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
66  cl::desc("Disable branch optimizations in CodeGenPrepare"));
67
68// FIXME: Remove this abomination once all of the tests pass without it!
69static cl::opt<bool> DisableDeleteDeadBlocks(
70  "disable-cgp-delete-dead-blocks", cl::Hidden, cl::init(false),
71  cl::desc("Disable deleting dead blocks in CodeGenPrepare"));
72
73namespace {
74  class CodeGenPrepare : public FunctionPass {
75    /// TLI - Keep a pointer of a TargetLowering to consult for determining
76    /// transformation profitability.
77    const TargetLowering *TLI;
78    const TargetLibraryInfo *TLInfo;
79    DominatorTree *DT;
80    ProfileInfo *PFI;
81
82    /// CurInstIterator - As we scan instructions optimizing them, this is the
83    /// next instruction to optimize.  Xforms that can invalidate this should
84    /// update it.
85    BasicBlock::iterator CurInstIterator;
86
87    /// Keeps track of non-local addresses that have been sunk into a block.
88    /// This allows us to avoid inserting duplicate code for blocks with
89    /// multiple load/stores of the same address.
90    DenseMap<Value*, Value*> SunkAddrs;
91
92    /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to
93    /// be updated.
94    bool ModifiedDT;
95
96  public:
97    static char ID; // Pass identification, replacement for typeid
98    explicit CodeGenPrepare(const TargetLowering *tli = 0)
99      : FunctionPass(ID), TLI(tli) {
100        initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
101      }
102    bool runOnFunction(Function &F);
103
104    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
105      AU.addPreserved<DominatorTree>();
106      AU.addPreserved<ProfileInfo>();
107      AU.addRequired<TargetLibraryInfo>();
108    }
109
110  private:
111    bool EliminateMostlyEmptyBlocks(Function &F);
112    bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
113    void EliminateMostlyEmptyBlock(BasicBlock *BB);
114    bool OptimizeBlock(BasicBlock &BB);
115    bool OptimizeInst(Instruction *I);
116    bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy);
117    bool OptimizeInlineAsmInst(CallInst *CS);
118    bool OptimizeCallInst(CallInst *CI);
119    bool MoveExtToFormExtLoad(Instruction *I);
120    bool OptimizeExtUses(Instruction *I);
121    bool DupRetToEnableTailCallOpts(ReturnInst *RI);
122    bool PlaceDbgValues(Function &F);
123  };
124}
125
126char CodeGenPrepare::ID = 0;
127INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
128                "Optimize for code generation", false, false)
129INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
130INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare",
131                "Optimize for code generation", false, false)
132
133FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
134  return new CodeGenPrepare(TLI);
135}
136
137bool CodeGenPrepare::runOnFunction(Function &F) {
138  bool EverMadeChange = false;
139
140  ModifiedDT = false;
141  TLInfo = &getAnalysis<TargetLibraryInfo>();
142  DT = getAnalysisIfAvailable<DominatorTree>();
143  PFI = getAnalysisIfAvailable<ProfileInfo>();
144
145  // First pass, eliminate blocks that contain only PHI nodes and an
146  // unconditional branch.
147  EverMadeChange |= EliminateMostlyEmptyBlocks(F);
148
149  // llvm.dbg.value is far away from the value then iSel may not be able
150  // handle it properly. iSel will drop llvm.dbg.value if it can not
151  // find a node corresponding to the value.
152  EverMadeChange |= PlaceDbgValues(F);
153
154  bool MadeChange = true;
155  while (MadeChange) {
156    MadeChange = false;
157    for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
158      BasicBlock *BB = I++;
159      MadeChange |= OptimizeBlock(*BB);
160    }
161    EverMadeChange |= MadeChange;
162  }
163
164  SunkAddrs.clear();
165
166  if (!DisableBranchOpts) {
167    MadeChange = false;
168    SmallPtrSet<BasicBlock*, 8> WorkList;
169    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
170      SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
171      MadeChange |= ConstantFoldTerminator(BB, true);
172      if (!MadeChange) continue;
173
174      for (SmallVectorImpl<BasicBlock*>::iterator
175             II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
176        if (pred_begin(*II) == pred_end(*II))
177          WorkList.insert(*II);
178    }
179
180    if (!DisableDeleteDeadBlocks)
181      for (SmallPtrSet<BasicBlock*, 8>::iterator
182             I = WorkList.begin(), E = WorkList.end(); I != E; ++I)
183        DeleteDeadBlock(*I);
184
185    if (MadeChange)
186      ModifiedDT = true;
187    EverMadeChange |= MadeChange;
188  }
189
190  if (ModifiedDT && DT)
191    DT->DT->recalculate(F);
192
193  return EverMadeChange;
194}
195
196/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes,
197/// debug info directives, and an unconditional branch.  Passes before isel
198/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for
199/// isel.  Start by eliminating these blocks so we can split them the way we
200/// want them.
201bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
202  bool MadeChange = false;
203  // Note that this intentionally skips the entry block.
204  for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
205    BasicBlock *BB = I++;
206
207    // If this block doesn't end with an uncond branch, ignore it.
208    BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
209    if (!BI || !BI->isUnconditional())
210      continue;
211
212    // If the instruction before the branch (skipping debug info) isn't a phi
213    // node, then other stuff is happening here.
214    BasicBlock::iterator BBI = BI;
215    if (BBI != BB->begin()) {
216      --BBI;
217      while (isa<DbgInfoIntrinsic>(BBI)) {
218        if (BBI == BB->begin())
219          break;
220        --BBI;
221      }
222      if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
223        continue;
224    }
225
226    // Do not break infinite loops.
227    BasicBlock *DestBB = BI->getSuccessor(0);
228    if (DestBB == BB)
229      continue;
230
231    if (!CanMergeBlocks(BB, DestBB))
232      continue;
233
234    EliminateMostlyEmptyBlock(BB);
235    MadeChange = true;
236  }
237  return MadeChange;
238}
239
240/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
241/// single uncond branch between them, and BB contains no other non-phi
242/// instructions.
243bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
244                                    const BasicBlock *DestBB) const {
245  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
246  // the successor.  If there are more complex condition (e.g. preheaders),
247  // don't mess around with them.
248  BasicBlock::const_iterator BBI = BB->begin();
249  while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
250    for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
251         UI != E; ++UI) {
252      const Instruction *User = cast<Instruction>(*UI);
253      if (User->getParent() != DestBB || !isa<PHINode>(User))
254        return false;
255      // If User is inside DestBB block and it is a PHINode then check
256      // incoming value. If incoming value is not from BB then this is
257      // a complex condition (e.g. preheaders) we want to avoid here.
258      if (User->getParent() == DestBB) {
259        if (const PHINode *UPN = dyn_cast<PHINode>(User))
260          for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
261            Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
262            if (Insn && Insn->getParent() == BB &&
263                Insn->getParent() != UPN->getIncomingBlock(I))
264              return false;
265          }
266      }
267    }
268  }
269
270  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
271  // and DestBB may have conflicting incoming values for the block.  If so, we
272  // can't merge the block.
273  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
274  if (!DestBBPN) return true;  // no conflict.
275
276  // Collect the preds of BB.
277  SmallPtrSet<const BasicBlock*, 16> BBPreds;
278  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
279    // It is faster to get preds from a PHI than with pred_iterator.
280    for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
281      BBPreds.insert(BBPN->getIncomingBlock(i));
282  } else {
283    BBPreds.insert(pred_begin(BB), pred_end(BB));
284  }
285
286  // Walk the preds of DestBB.
287  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
288    BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
289    if (BBPreds.count(Pred)) {   // Common predecessor?
290      BBI = DestBB->begin();
291      while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
292        const Value *V1 = PN->getIncomingValueForBlock(Pred);
293        const Value *V2 = PN->getIncomingValueForBlock(BB);
294
295        // If V2 is a phi node in BB, look up what the mapped value will be.
296        if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
297          if (V2PN->getParent() == BB)
298            V2 = V2PN->getIncomingValueForBlock(Pred);
299
300        // If there is a conflict, bail out.
301        if (V1 != V2) return false;
302      }
303    }
304  }
305
306  return true;
307}
308
309
310/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
311/// an unconditional branch in it.
312void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
313  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
314  BasicBlock *DestBB = BI->getSuccessor(0);
315
316  DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
317
318  // If the destination block has a single pred, then this is a trivial edge,
319  // just collapse it.
320  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
321    if (SinglePred != DestBB) {
322      // Remember if SinglePred was the entry block of the function.  If so, we
323      // will need to move BB back to the entry position.
324      bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
325      MergeBasicBlockIntoOnlyPred(DestBB, this);
326
327      if (isEntry && BB != &BB->getParent()->getEntryBlock())
328        BB->moveBefore(&BB->getParent()->getEntryBlock());
329
330      DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
331      return;
332    }
333  }
334
335  // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
336  // to handle the new incoming edges it is about to have.
337  PHINode *PN;
338  for (BasicBlock::iterator BBI = DestBB->begin();
339       (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
340    // Remove the incoming value for BB, and remember it.
341    Value *InVal = PN->removeIncomingValue(BB, false);
342
343    // Two options: either the InVal is a phi node defined in BB or it is some
344    // value that dominates BB.
345    PHINode *InValPhi = dyn_cast<PHINode>(InVal);
346    if (InValPhi && InValPhi->getParent() == BB) {
347      // Add all of the input values of the input PHI as inputs of this phi.
348      for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
349        PN->addIncoming(InValPhi->getIncomingValue(i),
350                        InValPhi->getIncomingBlock(i));
351    } else {
352      // Otherwise, add one instance of the dominating value for each edge that
353      // we will be adding.
354      if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
355        for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
356          PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
357      } else {
358        for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
359          PN->addIncoming(InVal, *PI);
360      }
361    }
362  }
363
364  // The PHIs are now updated, change everything that refers to BB to use
365  // DestBB and remove BB.
366  BB->replaceAllUsesWith(DestBB);
367  if (DT && !ModifiedDT) {
368    BasicBlock *BBIDom  = DT->getNode(BB)->getIDom()->getBlock();
369    BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock();
370    BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom);
371    DT->changeImmediateDominator(DestBB, NewIDom);
372    DT->eraseNode(BB);
373  }
374  if (PFI) {
375    PFI->replaceAllUses(BB, DestBB);
376    PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB));
377  }
378  BB->eraseFromParent();
379  ++NumBlocksElim;
380
381  DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
382}
383
384/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
385/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
386/// sink it into user blocks to reduce the number of virtual
387/// registers that must be created and coalesced.
388///
389/// Return true if any changes are made.
390///
391static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
392  // If this is a noop copy,
393  EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
394  EVT DstVT = TLI.getValueType(CI->getType());
395
396  // This is an fp<->int conversion?
397  if (SrcVT.isInteger() != DstVT.isInteger())
398    return false;
399
400  // If this is an extension, it will be a zero or sign extension, which
401  // isn't a noop.
402  if (SrcVT.bitsLT(DstVT)) return false;
403
404  // If these values will be promoted, find out what they will be promoted
405  // to.  This helps us consider truncates on PPC as noop copies when they
406  // are.
407  if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
408      TargetLowering::TypePromoteInteger)
409    SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
410  if (TLI.getTypeAction(CI->getContext(), DstVT) ==
411      TargetLowering::TypePromoteInteger)
412    DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
413
414  // If, after promotion, these are the same types, this is a noop copy.
415  if (SrcVT != DstVT)
416    return false;
417
418  BasicBlock *DefBB = CI->getParent();
419
420  /// InsertedCasts - Only insert a cast in each block once.
421  DenseMap<BasicBlock*, CastInst*> InsertedCasts;
422
423  bool MadeChange = false;
424  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
425       UI != E; ) {
426    Use &TheUse = UI.getUse();
427    Instruction *User = cast<Instruction>(*UI);
428
429    // Figure out which BB this cast is used in.  For PHI's this is the
430    // appropriate predecessor block.
431    BasicBlock *UserBB = User->getParent();
432    if (PHINode *PN = dyn_cast<PHINode>(User)) {
433      UserBB = PN->getIncomingBlock(UI);
434    }
435
436    // Preincrement use iterator so we don't invalidate it.
437    ++UI;
438
439    // If this user is in the same block as the cast, don't change the cast.
440    if (UserBB == DefBB) continue;
441
442    // If we have already inserted a cast into this block, use it.
443    CastInst *&InsertedCast = InsertedCasts[UserBB];
444
445    if (!InsertedCast) {
446      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
447      InsertedCast =
448        CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
449                         InsertPt);
450      MadeChange = true;
451    }
452
453    // Replace a use of the cast with a use of the new cast.
454    TheUse = InsertedCast;
455    ++NumCastUses;
456  }
457
458  // If we removed all uses, nuke the cast.
459  if (CI->use_empty()) {
460    CI->eraseFromParent();
461    MadeChange = true;
462  }
463
464  return MadeChange;
465}
466
467/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
468/// the number of virtual registers that must be created and coalesced.  This is
469/// a clear win except on targets with multiple condition code registers
470///  (PowerPC), where it might lose; some adjustment may be wanted there.
471///
472/// Return true if any changes are made.
473static bool OptimizeCmpExpression(CmpInst *CI) {
474  BasicBlock *DefBB = CI->getParent();
475
476  /// InsertedCmp - Only insert a cmp in each block once.
477  DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
478
479  bool MadeChange = false;
480  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
481       UI != E; ) {
482    Use &TheUse = UI.getUse();
483    Instruction *User = cast<Instruction>(*UI);
484
485    // Preincrement use iterator so we don't invalidate it.
486    ++UI;
487
488    // Don't bother for PHI nodes.
489    if (isa<PHINode>(User))
490      continue;
491
492    // Figure out which BB this cmp is used in.
493    BasicBlock *UserBB = User->getParent();
494
495    // If this user is in the same block as the cmp, don't change the cmp.
496    if (UserBB == DefBB) continue;
497
498    // If we have already inserted a cmp into this block, use it.
499    CmpInst *&InsertedCmp = InsertedCmps[UserBB];
500
501    if (!InsertedCmp) {
502      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
503      InsertedCmp =
504        CmpInst::Create(CI->getOpcode(),
505                        CI->getPredicate(),  CI->getOperand(0),
506                        CI->getOperand(1), "", InsertPt);
507      MadeChange = true;
508    }
509
510    // Replace a use of the cmp with a use of the new cmp.
511    TheUse = InsertedCmp;
512    ++NumCmpUses;
513  }
514
515  // If we removed all uses, nuke the cmp.
516  if (CI->use_empty())
517    CI->eraseFromParent();
518
519  return MadeChange;
520}
521
522namespace {
523class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls {
524protected:
525  void replaceCall(Value *With) {
526    CI->replaceAllUsesWith(With);
527    CI->eraseFromParent();
528  }
529  bool isFoldable(unsigned SizeCIOp, unsigned, bool) const {
530      if (ConstantInt *SizeCI =
531                             dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp)))
532        return SizeCI->isAllOnesValue();
533    return false;
534  }
535};
536} // end anonymous namespace
537
538bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
539  BasicBlock *BB = CI->getParent();
540
541  // Lower inline assembly if we can.
542  // If we found an inline asm expession, and if the target knows how to
543  // lower it to normal LLVM code, do so now.
544  if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
545    if (TLI->ExpandInlineAsm(CI)) {
546      // Avoid invalidating the iterator.
547      CurInstIterator = BB->begin();
548      // Avoid processing instructions out of order, which could cause
549      // reuse before a value is defined.
550      SunkAddrs.clear();
551      return true;
552    }
553    // Sink address computing for memory operands into the block.
554    if (OptimizeInlineAsmInst(CI))
555      return true;
556  }
557
558  // Lower all uses of llvm.objectsize.*
559  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
560  if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
561    bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
562    Type *ReturnTy = CI->getType();
563    Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
564
565    // Substituting this can cause recursive simplifications, which can
566    // invalidate our iterator.  Use a WeakVH to hold onto it in case this
567    // happens.
568    WeakVH IterHandle(CurInstIterator);
569
570    replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0,
571                                  TLInfo, ModifiedDT ? 0 : DT);
572
573    // If the iterator instruction was recursively deleted, start over at the
574    // start of the block.
575    if (IterHandle != CurInstIterator) {
576      CurInstIterator = BB->begin();
577      SunkAddrs.clear();
578    }
579    return true;
580  }
581
582  if (II && TLI) {
583    SmallVector<Value*, 2> PtrOps;
584    Type *AccessTy;
585    if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy))
586      while (!PtrOps.empty())
587        if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy))
588          return true;
589  }
590
591  // From here on out we're working with named functions.
592  if (CI->getCalledFunction() == 0) return false;
593
594  // We'll need TargetData from here on out.
595  const TargetData *TD = TLI ? TLI->getTargetData() : 0;
596  if (!TD) return false;
597
598  // Lower all default uses of _chk calls.  This is very similar
599  // to what InstCombineCalls does, but here we are only lowering calls
600  // that have the default "don't know" as the objectsize.  Anything else
601  // should be left alone.
602  CodeGenPrepareFortifiedLibCalls Simplifier;
603  return Simplifier.fold(CI, TD);
604}
605
606/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
607/// instructions to the predecessor to enable tail call optimizations. The
608/// case it is currently looking for is:
609/// bb0:
610///   %tmp0 = tail call i32 @f0()
611///   br label %return
612/// bb1:
613///   %tmp1 = tail call i32 @f1()
614///   br label %return
615/// bb2:
616///   %tmp2 = tail call i32 @f2()
617///   br label %return
618/// return:
619///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
620///   ret i32 %retval
621///
622/// =>
623///
624/// bb0:
625///   %tmp0 = tail call i32 @f0()
626///   ret i32 %tmp0
627/// bb1:
628///   %tmp1 = tail call i32 @f1()
629///   ret i32 %tmp1
630/// bb2:
631///   %tmp2 = tail call i32 @f2()
632///   ret i32 %tmp2
633///
634bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
635  if (!TLI)
636    return false;
637
638  Value *V = RI->getReturnValue();
639  PHINode *PN = V ? dyn_cast<PHINode>(V) : NULL;
640  if (V && !PN)
641    return false;
642
643  BasicBlock *BB = RI->getParent();
644  if (PN && PN->getParent() != BB)
645    return false;
646
647  // It's not safe to eliminate the sign / zero extension of the return value.
648  // See llvm::isInTailCallPosition().
649  const Function *F = BB->getParent();
650  Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
651  if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
652    return false;
653
654  // Make sure there are no instructions between the PHI and return, or that the
655  // return is the first instruction in the block.
656  if (PN) {
657    BasicBlock::iterator BI = BB->begin();
658    do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
659    if (&*BI != RI)
660      return false;
661  } else {
662    BasicBlock::iterator BI = BB->begin();
663    while (isa<DbgInfoIntrinsic>(BI)) ++BI;
664    if (&*BI != RI)
665      return false;
666  }
667
668  /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
669  /// call.
670  SmallVector<CallInst*, 4> TailCalls;
671  if (PN) {
672    for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
673      CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
674      // Make sure the phi value is indeed produced by the tail call.
675      if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
676          TLI->mayBeEmittedAsTailCall(CI))
677        TailCalls.push_back(CI);
678    }
679  } else {
680    SmallPtrSet<BasicBlock*, 4> VisitedBBs;
681    for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
682      if (!VisitedBBs.insert(*PI))
683        continue;
684
685      BasicBlock::InstListType &InstList = (*PI)->getInstList();
686      BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
687      BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
688      do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
689      if (RI == RE)
690        continue;
691
692      CallInst *CI = dyn_cast<CallInst>(&*RI);
693      if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI))
694        TailCalls.push_back(CI);
695    }
696  }
697
698  bool Changed = false;
699  for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
700    CallInst *CI = TailCalls[i];
701    CallSite CS(CI);
702
703    // Conservatively require the attributes of the call to match those of the
704    // return. Ignore noalias because it doesn't affect the call sequence.
705    Attributes CalleeRetAttr = CS.getAttributes().getRetAttributes();
706    if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
707      continue;
708
709    // Make sure the call instruction is followed by an unconditional branch to
710    // the return block.
711    BasicBlock *CallBB = CI->getParent();
712    BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
713    if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
714      continue;
715
716    // Duplicate the return into CallBB.
717    (void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
718    ModifiedDT = Changed = true;
719    ++NumRetsDup;
720  }
721
722  // If we eliminated all predecessors of the block, delete the block now.
723  if (Changed && pred_begin(BB) == pred_end(BB))
724    BB->eraseFromParent();
725
726  return Changed;
727}
728
729//===----------------------------------------------------------------------===//
730// Memory Optimization
731//===----------------------------------------------------------------------===//
732
733/// IsNonLocalValue - Return true if the specified values are defined in a
734/// different basic block than BB.
735static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
736  if (Instruction *I = dyn_cast<Instruction>(V))
737    return I->getParent() != BB;
738  return false;
739}
740
741/// OptimizeMemoryInst - Load and Store Instructions often have
742/// addressing modes that can do significant amounts of computation.  As such,
743/// instruction selection will try to get the load or store to do as much
744/// computation as possible for the program.  The problem is that isel can only
745/// see within a single block.  As such, we sink as much legal addressing mode
746/// stuff into the block as possible.
747///
748/// This method is used to optimize both load/store and inline asms with memory
749/// operands.
750bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
751                                        Type *AccessTy) {
752  Value *Repl = Addr;
753
754  // Try to collapse single-value PHI nodes.  This is necessary to undo
755  // unprofitable PRE transformations.
756  SmallVector<Value*, 8> worklist;
757  SmallPtrSet<Value*, 16> Visited;
758  worklist.push_back(Addr);
759
760  // Use a worklist to iteratively look through PHI nodes, and ensure that
761  // the addressing mode obtained from the non-PHI roots of the graph
762  // are equivalent.
763  Value *Consensus = 0;
764  unsigned NumUsesConsensus = 0;
765  bool IsNumUsesConsensusValid = false;
766  SmallVector<Instruction*, 16> AddrModeInsts;
767  ExtAddrMode AddrMode;
768  while (!worklist.empty()) {
769    Value *V = worklist.back();
770    worklist.pop_back();
771
772    // Break use-def graph loops.
773    if (!Visited.insert(V)) {
774      Consensus = 0;
775      break;
776    }
777
778    // For a PHI node, push all of its incoming values.
779    if (PHINode *P = dyn_cast<PHINode>(V)) {
780      for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
781        worklist.push_back(P->getIncomingValue(i));
782      continue;
783    }
784
785    // For non-PHIs, determine the addressing mode being computed.
786    SmallVector<Instruction*, 16> NewAddrModeInsts;
787    ExtAddrMode NewAddrMode =
788      AddressingModeMatcher::Match(V, AccessTy, MemoryInst,
789                                   NewAddrModeInsts, *TLI);
790
791    // This check is broken into two cases with very similar code to avoid using
792    // getNumUses() as much as possible. Some values have a lot of uses, so
793    // calling getNumUses() unconditionally caused a significant compile-time
794    // regression.
795    if (!Consensus) {
796      Consensus = V;
797      AddrMode = NewAddrMode;
798      AddrModeInsts = NewAddrModeInsts;
799      continue;
800    } else if (NewAddrMode == AddrMode) {
801      if (!IsNumUsesConsensusValid) {
802        NumUsesConsensus = Consensus->getNumUses();
803        IsNumUsesConsensusValid = true;
804      }
805
806      // Ensure that the obtained addressing mode is equivalent to that obtained
807      // for all other roots of the PHI traversal.  Also, when choosing one
808      // such root as representative, select the one with the most uses in order
809      // to keep the cost modeling heuristics in AddressingModeMatcher
810      // applicable.
811      unsigned NumUses = V->getNumUses();
812      if (NumUses > NumUsesConsensus) {
813        Consensus = V;
814        NumUsesConsensus = NumUses;
815        AddrModeInsts = NewAddrModeInsts;
816      }
817      continue;
818    }
819
820    Consensus = 0;
821    break;
822  }
823
824  // If the addressing mode couldn't be determined, or if multiple different
825  // ones were determined, bail out now.
826  if (!Consensus) return false;
827
828  // Check to see if any of the instructions supersumed by this addr mode are
829  // non-local to I's BB.
830  bool AnyNonLocal = false;
831  for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
832    if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
833      AnyNonLocal = true;
834      break;
835    }
836  }
837
838  // If all the instructions matched are already in this BB, don't do anything.
839  if (!AnyNonLocal) {
840    DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode << "\n");
841    return false;
842  }
843
844  // Insert this computation right after this user.  Since our caller is
845  // scanning from the top of the BB to the bottom, reuse of the expr are
846  // guaranteed to happen later.
847  IRBuilder<> Builder(MemoryInst);
848
849  // Now that we determined the addressing expression we want to use and know
850  // that we have to sink it into this block.  Check to see if we have already
851  // done this for some other load/store instr in this block.  If so, reuse the
852  // computation.
853  Value *&SunkAddr = SunkAddrs[Addr];
854  if (SunkAddr) {
855    DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
856                 << *MemoryInst);
857    if (SunkAddr->getType() != Addr->getType())
858      SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
859  } else {
860    DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
861                 << *MemoryInst);
862    Type *IntPtrTy =
863          TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
864
865    Value *Result = 0;
866
867    // Start with the base register. Do this first so that subsequent address
868    // matching finds it last, which will prevent it from trying to match it
869    // as the scaled value in case it happens to be a mul. That would be
870    // problematic if we've sunk a different mul for the scale, because then
871    // we'd end up sinking both muls.
872    if (AddrMode.BaseReg) {
873      Value *V = AddrMode.BaseReg;
874      if (V->getType()->isPointerTy())
875        V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
876      if (V->getType() != IntPtrTy)
877        V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
878      Result = V;
879    }
880
881    // Add the scale value.
882    if (AddrMode.Scale) {
883      Value *V = AddrMode.ScaledReg;
884      if (V->getType() == IntPtrTy) {
885        // done.
886      } else if (V->getType()->isPointerTy()) {
887        V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
888      } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
889                 cast<IntegerType>(V->getType())->getBitWidth()) {
890        V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
891      } else {
892        V = Builder.CreateSExt(V, IntPtrTy, "sunkaddr");
893      }
894      if (AddrMode.Scale != 1)
895        V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
896                              "sunkaddr");
897      if (Result)
898        Result = Builder.CreateAdd(Result, V, "sunkaddr");
899      else
900        Result = V;
901    }
902
903    // Add in the BaseGV if present.
904    if (AddrMode.BaseGV) {
905      Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
906      if (Result)
907        Result = Builder.CreateAdd(Result, V, "sunkaddr");
908      else
909        Result = V;
910    }
911
912    // Add in the Base Offset if present.
913    if (AddrMode.BaseOffs) {
914      Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
915      if (Result)
916        Result = Builder.CreateAdd(Result, V, "sunkaddr");
917      else
918        Result = V;
919    }
920
921    if (Result == 0)
922      SunkAddr = Constant::getNullValue(Addr->getType());
923    else
924      SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
925  }
926
927  MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
928
929  // If we have no uses, recursively delete the value and all dead instructions
930  // using it.
931  if (Repl->use_empty()) {
932    // This can cause recursive deletion, which can invalidate our iterator.
933    // Use a WeakVH to hold onto it in case this happens.
934    WeakVH IterHandle(CurInstIterator);
935    BasicBlock *BB = CurInstIterator->getParent();
936
937    RecursivelyDeleteTriviallyDeadInstructions(Repl);
938
939    if (IterHandle != CurInstIterator) {
940      // If the iterator instruction was recursively deleted, start over at the
941      // start of the block.
942      CurInstIterator = BB->begin();
943      SunkAddrs.clear();
944    } else {
945      // This address is now available for reassignment, so erase the table
946      // entry; we don't want to match some completely different instruction.
947      SunkAddrs[Addr] = 0;
948    }
949  }
950  ++NumMemoryInsts;
951  return true;
952}
953
954/// OptimizeInlineAsmInst - If there are any memory operands, use
955/// OptimizeMemoryInst to sink their address computing into the block when
956/// possible / profitable.
957bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
958  bool MadeChange = false;
959
960  TargetLowering::AsmOperandInfoVector
961    TargetConstraints = TLI->ParseConstraints(CS);
962  unsigned ArgNo = 0;
963  for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
964    TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
965
966    // Compute the constraint code and ConstraintType to use.
967    TLI->ComputeConstraintToUse(OpInfo, SDValue());
968
969    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
970        OpInfo.isIndirect) {
971      Value *OpVal = CS->getArgOperand(ArgNo++);
972      MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
973    } else if (OpInfo.Type == InlineAsm::isInput)
974      ArgNo++;
975  }
976
977  return MadeChange;
978}
979
980/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same
981/// basic block as the load, unless conditions are unfavorable. This allows
982/// SelectionDAG to fold the extend into the load.
983///
984bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
985  // Look for a load being extended.
986  LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0));
987  if (!LI) return false;
988
989  // If they're already in the same block, there's nothing to do.
990  if (LI->getParent() == I->getParent())
991    return false;
992
993  // If the load has other users and the truncate is not free, this probably
994  // isn't worthwhile.
995  if (!LI->hasOneUse() &&
996      TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) ||
997              !TLI->isTypeLegal(TLI->getValueType(I->getType()))) &&
998      !TLI->isTruncateFree(I->getType(), LI->getType()))
999    return false;
1000
1001  // Check whether the target supports casts folded into loads.
1002  unsigned LType;
1003  if (isa<ZExtInst>(I))
1004    LType = ISD::ZEXTLOAD;
1005  else {
1006    assert(isa<SExtInst>(I) && "Unexpected ext type!");
1007    LType = ISD::SEXTLOAD;
1008  }
1009  if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType())))
1010    return false;
1011
1012  // Move the extend into the same block as the load, so that SelectionDAG
1013  // can fold it.
1014  I->removeFromParent();
1015  I->insertAfter(LI);
1016  ++NumExtsMoved;
1017  return true;
1018}
1019
1020bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
1021  BasicBlock *DefBB = I->getParent();
1022
1023  // If the result of a {s|z}ext and its source are both live out, rewrite all
1024  // other uses of the source with result of extension.
1025  Value *Src = I->getOperand(0);
1026  if (Src->hasOneUse())
1027    return false;
1028
1029  // Only do this xform if truncating is free.
1030  if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
1031    return false;
1032
1033  // Only safe to perform the optimization if the source is also defined in
1034  // this block.
1035  if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
1036    return false;
1037
1038  bool DefIsLiveOut = false;
1039  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
1040       UI != E; ++UI) {
1041    Instruction *User = cast<Instruction>(*UI);
1042
1043    // Figure out which BB this ext is used in.
1044    BasicBlock *UserBB = User->getParent();
1045    if (UserBB == DefBB) continue;
1046    DefIsLiveOut = true;
1047    break;
1048  }
1049  if (!DefIsLiveOut)
1050    return false;
1051
1052  // Make sure non of the uses are PHI nodes.
1053  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
1054       UI != E; ++UI) {
1055    Instruction *User = cast<Instruction>(*UI);
1056    BasicBlock *UserBB = User->getParent();
1057    if (UserBB == DefBB) continue;
1058    // Be conservative. We don't want this xform to end up introducing
1059    // reloads just before load / store instructions.
1060    if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
1061      return false;
1062  }
1063
1064  // InsertedTruncs - Only insert one trunc in each block once.
1065  DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
1066
1067  bool MadeChange = false;
1068  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
1069       UI != E; ++UI) {
1070    Use &TheUse = UI.getUse();
1071    Instruction *User = cast<Instruction>(*UI);
1072
1073    // Figure out which BB this ext is used in.
1074    BasicBlock *UserBB = User->getParent();
1075    if (UserBB == DefBB) continue;
1076
1077    // Both src and def are live in this block. Rewrite the use.
1078    Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
1079
1080    if (!InsertedTrunc) {
1081      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1082      InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
1083    }
1084
1085    // Replace a use of the {s|z}ext source with a use of the result.
1086    TheUse = InsertedTrunc;
1087    ++NumExtUses;
1088    MadeChange = true;
1089  }
1090
1091  return MadeChange;
1092}
1093
1094bool CodeGenPrepare::OptimizeInst(Instruction *I) {
1095  if (PHINode *P = dyn_cast<PHINode>(I)) {
1096    // It is possible for very late stage optimizations (such as SimplifyCFG)
1097    // to introduce PHI nodes too late to be cleaned up.  If we detect such a
1098    // trivial PHI, go ahead and zap it here.
1099    if (Value *V = SimplifyInstruction(P)) {
1100      P->replaceAllUsesWith(V);
1101      P->eraseFromParent();
1102      ++NumPHIsElim;
1103      return true;
1104    }
1105    return false;
1106  }
1107
1108  if (CastInst *CI = dyn_cast<CastInst>(I)) {
1109    // If the source of the cast is a constant, then this should have
1110    // already been constant folded.  The only reason NOT to constant fold
1111    // it is if something (e.g. LSR) was careful to place the constant
1112    // evaluation in a block other than then one that uses it (e.g. to hoist
1113    // the address of globals out of a loop).  If this is the case, we don't
1114    // want to forward-subst the cast.
1115    if (isa<Constant>(CI->getOperand(0)))
1116      return false;
1117
1118    if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
1119      return true;
1120
1121    if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
1122      bool MadeChange = MoveExtToFormExtLoad(I);
1123      return MadeChange | OptimizeExtUses(I);
1124    }
1125    return false;
1126  }
1127
1128  if (CmpInst *CI = dyn_cast<CmpInst>(I))
1129    return OptimizeCmpExpression(CI);
1130
1131  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1132    if (TLI)
1133      return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
1134    return false;
1135  }
1136
1137  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1138    if (TLI)
1139      return OptimizeMemoryInst(I, SI->getOperand(1),
1140                                SI->getOperand(0)->getType());
1141    return false;
1142  }
1143
1144  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1145    if (GEPI->hasAllZeroIndices()) {
1146      /// The GEP operand must be a pointer, so must its result -> BitCast
1147      Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
1148                                        GEPI->getName(), GEPI);
1149      GEPI->replaceAllUsesWith(NC);
1150      GEPI->eraseFromParent();
1151      ++NumGEPsElim;
1152      OptimizeInst(NC);
1153      return true;
1154    }
1155    return false;
1156  }
1157
1158  if (CallInst *CI = dyn_cast<CallInst>(I))
1159    return OptimizeCallInst(CI);
1160
1161  if (ReturnInst *RI = dyn_cast<ReturnInst>(I))
1162    return DupRetToEnableTailCallOpts(RI);
1163
1164  return false;
1165}
1166
1167// In this pass we look for GEP and cast instructions that are used
1168// across basic blocks and rewrite them to improve basic-block-at-a-time
1169// selection.
1170bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
1171  SunkAddrs.clear();
1172  bool MadeChange = false;
1173
1174  CurInstIterator = BB.begin();
1175  for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; )
1176    MadeChange |= OptimizeInst(CurInstIterator++);
1177
1178  return MadeChange;
1179}
1180
1181// llvm.dbg.value is far away from the value then iSel may not be able
1182// handle it properly. iSel will drop llvm.dbg.value if it can not
1183// find a node corresponding to the value.
1184bool CodeGenPrepare::PlaceDbgValues(Function &F) {
1185  bool MadeChange = false;
1186  for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
1187    Instruction *PrevNonDbgInst = NULL;
1188    for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
1189      Instruction *Insn = BI; ++BI;
1190      DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
1191      if (!DVI) {
1192        PrevNonDbgInst = Insn;
1193        continue;
1194      }
1195
1196      Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
1197      if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
1198        DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
1199        DVI->removeFromParent();
1200        if (isa<PHINode>(VI))
1201          DVI->insertBefore(VI->getParent()->getFirstInsertionPt());
1202        else
1203          DVI->insertAfter(VI);
1204        MadeChange = true;
1205        ++NumDbgValueMoved;
1206      }
1207    }
1208  }
1209  return MadeChange;
1210}
1211