1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass munges the code in the input function to better prepare it for
11// SelectionDAG-based code generation. This works around limitations in it's
12// basic-block-at-a-time approach. It should eventually be removed.
13//
14//===----------------------------------------------------------------------===//
15
16#define DEBUG_TYPE "codegenprepare"
17#include "llvm/Transforms/Scalar.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/SmallSet.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/DominatorInternals.h"
22#include "llvm/Analysis/Dominators.h"
23#include "llvm/Analysis/InstructionSimplify.h"
24#include "llvm/Analysis/ProfileInfo.h"
25#include "llvm/Assembly/Writer.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/Function.h"
30#include "llvm/IR/IRBuilder.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instructions.h"
33#include "llvm/IR/IntrinsicInst.h"
34#include "llvm/Pass.h"
35#include "llvm/Support/CallSite.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/GetElementPtrTypeIterator.h"
39#include "llvm/Support/PatternMatch.h"
40#include "llvm/Support/ValueHandle.h"
41#include "llvm/Support/raw_ostream.h"
42#include "llvm/Target/TargetLibraryInfo.h"
43#include "llvm/Target/TargetLowering.h"
44#include "llvm/Transforms/Utils/BasicBlockUtils.h"
45#include "llvm/Transforms/Utils/BuildLibCalls.h"
46#include "llvm/Transforms/Utils/BypassSlowDivision.h"
47#include "llvm/Transforms/Utils/Local.h"
48using namespace llvm;
49using namespace llvm::PatternMatch;
50
51STATISTIC(NumBlocksElim, "Number of blocks eliminated");
52STATISTIC(NumPHIsElim,   "Number of trivial PHIs eliminated");
53STATISTIC(NumGEPsElim,   "Number of GEPs converted to casts");
54STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
55                      "sunken Cmps");
56STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
57                       "of sunken Casts");
58STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
59                          "computations were sunk");
60STATISTIC(NumExtsMoved,  "Number of [s|z]ext instructions combined with loads");
61STATISTIC(NumExtUses,    "Number of uses of [s|z]ext instructions optimized");
62STATISTIC(NumRetsDup,    "Number of return instructions duplicated");
63STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
64STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
65
66static cl::opt<bool> DisableBranchOpts(
67  "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
68  cl::desc("Disable branch optimizations in CodeGenPrepare"));
69
70static cl::opt<bool> DisableSelectToBranch(
71  "disable-cgp-select2branch", cl::Hidden, cl::init(false),
72  cl::desc("Disable select to branch conversion."));
73
74namespace {
75  class CodeGenPrepare : public FunctionPass {
76    /// TLI - Keep a pointer of a TargetLowering to consult for determining
77    /// transformation profitability.
78    const TargetLowering *TLI;
79    const TargetLibraryInfo *TLInfo;
80    DominatorTree *DT;
81    ProfileInfo *PFI;
82
83    /// CurInstIterator - As we scan instructions optimizing them, this is the
84    /// next instruction to optimize.  Xforms that can invalidate this should
85    /// update it.
86    BasicBlock::iterator CurInstIterator;
87
88    /// Keeps track of non-local addresses that have been sunk into a block.
89    /// This allows us to avoid inserting duplicate code for blocks with
90    /// multiple load/stores of the same address.
91    DenseMap<Value*, Value*> SunkAddrs;
92
93    /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to
94    /// be updated.
95    bool ModifiedDT;
96
97    /// OptSize - True if optimizing for size.
98    bool OptSize;
99
100  public:
101    static char ID; // Pass identification, replacement for typeid
102    explicit CodeGenPrepare(const TargetLowering *tli = 0)
103      : FunctionPass(ID), TLI(tli) {
104        initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
105      }
106    bool runOnFunction(Function &F);
107
108    const char *getPassName() const { return "CodeGen Prepare"; }
109
110    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
111      AU.addPreserved<DominatorTree>();
112      AU.addPreserved<ProfileInfo>();
113      AU.addRequired<TargetLibraryInfo>();
114    }
115
116  private:
117    bool EliminateFallThrough(Function &F);
118    bool EliminateMostlyEmptyBlocks(Function &F);
119    bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
120    void EliminateMostlyEmptyBlock(BasicBlock *BB);
121    bool OptimizeBlock(BasicBlock &BB);
122    bool OptimizeInst(Instruction *I);
123    bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy);
124    bool OptimizeInlineAsmInst(CallInst *CS);
125    bool OptimizeCallInst(CallInst *CI);
126    bool MoveExtToFormExtLoad(Instruction *I);
127    bool OptimizeExtUses(Instruction *I);
128    bool OptimizeSelectInst(SelectInst *SI);
129    bool DupRetToEnableTailCallOpts(BasicBlock *BB);
130    bool PlaceDbgValues(Function &F);
131  };
132}
133
134char CodeGenPrepare::ID = 0;
135INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare",
136                "Optimize for code generation", false, false)
137INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
138INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare",
139                "Optimize for code generation", false, false)
140
141FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
142  return new CodeGenPrepare(TLI);
143}
144
145bool CodeGenPrepare::runOnFunction(Function &F) {
146  bool EverMadeChange = false;
147
148  ModifiedDT = false;
149  TLInfo = &getAnalysis<TargetLibraryInfo>();
150  DT = getAnalysisIfAvailable<DominatorTree>();
151  PFI = getAnalysisIfAvailable<ProfileInfo>();
152  OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
153                                           Attribute::OptimizeForSize);
154
155  /// This optimization identifies DIV instructions that can be
156  /// profitably bypassed and carried out with a shorter, faster divide.
157  if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
158    const DenseMap<unsigned int, unsigned int> &BypassWidths =
159       TLI->getBypassSlowDivWidths();
160    for (Function::iterator I = F.begin(); I != F.end(); I++)
161      EverMadeChange |= bypassSlowDivision(F, I, BypassWidths);
162  }
163
164  // Eliminate blocks that contain only PHI nodes and an
165  // unconditional branch.
166  EverMadeChange |= EliminateMostlyEmptyBlocks(F);
167
168  // llvm.dbg.value is far away from the value then iSel may not be able
169  // handle it properly. iSel will drop llvm.dbg.value if it can not
170  // find a node corresponding to the value.
171  EverMadeChange |= PlaceDbgValues(F);
172
173  bool MadeChange = true;
174  while (MadeChange) {
175    MadeChange = false;
176    for (Function::iterator I = F.begin(); I != F.end(); ) {
177      BasicBlock *BB = I++;
178      MadeChange |= OptimizeBlock(*BB);
179    }
180    EverMadeChange |= MadeChange;
181  }
182
183  SunkAddrs.clear();
184
185  if (!DisableBranchOpts) {
186    MadeChange = false;
187    SmallPtrSet<BasicBlock*, 8> WorkList;
188    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
189      SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
190      MadeChange |= ConstantFoldTerminator(BB, true);
191      if (!MadeChange) continue;
192
193      for (SmallVectorImpl<BasicBlock*>::iterator
194             II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
195        if (pred_begin(*II) == pred_end(*II))
196          WorkList.insert(*II);
197    }
198
199    // Delete the dead blocks and any of their dead successors.
200    MadeChange |= !WorkList.empty();
201    while (!WorkList.empty()) {
202      BasicBlock *BB = *WorkList.begin();
203      WorkList.erase(BB);
204      SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
205
206      DeleteDeadBlock(BB);
207
208      for (SmallVectorImpl<BasicBlock*>::iterator
209             II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
210        if (pred_begin(*II) == pred_end(*II))
211          WorkList.insert(*II);
212    }
213
214    // Merge pairs of basic blocks with unconditional branches, connected by
215    // a single edge.
216    if (EverMadeChange || MadeChange)
217      MadeChange |= EliminateFallThrough(F);
218
219    if (MadeChange)
220      ModifiedDT = true;
221    EverMadeChange |= MadeChange;
222  }
223
224  if (ModifiedDT && DT)
225    DT->DT->recalculate(F);
226
227  return EverMadeChange;
228}
229
230/// EliminateFallThrough - Merge basic blocks which are connected
231/// by a single edge, where one of the basic blocks has a single successor
232/// pointing to the other basic block, which has a single predecessor.
233bool CodeGenPrepare::EliminateFallThrough(Function &F) {
234  bool Changed = false;
235  // Scan all of the blocks in the function, except for the entry block.
236  for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
237    BasicBlock *BB = I++;
238    // If the destination block has a single pred, then this is a trivial
239    // edge, just collapse it.
240    BasicBlock *SinglePred = BB->getSinglePredecessor();
241
242    // Don't merge if BB's address is taken.
243    if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
244
245    BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
246    if (Term && !Term->isConditional()) {
247      Changed = true;
248      DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
249      // Remember if SinglePred was the entry block of the function.
250      // If so, we will need to move BB back to the entry position.
251      bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
252      MergeBasicBlockIntoOnlyPred(BB, this);
253
254      if (isEntry && BB != &BB->getParent()->getEntryBlock())
255        BB->moveBefore(&BB->getParent()->getEntryBlock());
256
257      // We have erased a block. Update the iterator.
258      I = BB;
259    }
260  }
261  return Changed;
262}
263
264/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes,
265/// debug info directives, and an unconditional branch.  Passes before isel
266/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for
267/// isel.  Start by eliminating these blocks so we can split them the way we
268/// want them.
269bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
270  bool MadeChange = false;
271  // Note that this intentionally skips the entry block.
272  for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
273    BasicBlock *BB = I++;
274
275    // If this block doesn't end with an uncond branch, ignore it.
276    BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
277    if (!BI || !BI->isUnconditional())
278      continue;
279
280    // If the instruction before the branch (skipping debug info) isn't a phi
281    // node, then other stuff is happening here.
282    BasicBlock::iterator BBI = BI;
283    if (BBI != BB->begin()) {
284      --BBI;
285      while (isa<DbgInfoIntrinsic>(BBI)) {
286        if (BBI == BB->begin())
287          break;
288        --BBI;
289      }
290      if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
291        continue;
292    }
293
294    // Do not break infinite loops.
295    BasicBlock *DestBB = BI->getSuccessor(0);
296    if (DestBB == BB)
297      continue;
298
299    if (!CanMergeBlocks(BB, DestBB))
300      continue;
301
302    EliminateMostlyEmptyBlock(BB);
303    MadeChange = true;
304  }
305  return MadeChange;
306}
307
308/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
309/// single uncond branch between them, and BB contains no other non-phi
310/// instructions.
311bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
312                                    const BasicBlock *DestBB) const {
313  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
314  // the successor.  If there are more complex condition (e.g. preheaders),
315  // don't mess around with them.
316  BasicBlock::const_iterator BBI = BB->begin();
317  while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
318    for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
319         UI != E; ++UI) {
320      const Instruction *User = cast<Instruction>(*UI);
321      if (User->getParent() != DestBB || !isa<PHINode>(User))
322        return false;
323      // If User is inside DestBB block and it is a PHINode then check
324      // incoming value. If incoming value is not from BB then this is
325      // a complex condition (e.g. preheaders) we want to avoid here.
326      if (User->getParent() == DestBB) {
327        if (const PHINode *UPN = dyn_cast<PHINode>(User))
328          for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
329            Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
330            if (Insn && Insn->getParent() == BB &&
331                Insn->getParent() != UPN->getIncomingBlock(I))
332              return false;
333          }
334      }
335    }
336  }
337
338  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
339  // and DestBB may have conflicting incoming values for the block.  If so, we
340  // can't merge the block.
341  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
342  if (!DestBBPN) return true;  // no conflict.
343
344  // Collect the preds of BB.
345  SmallPtrSet<const BasicBlock*, 16> BBPreds;
346  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
347    // It is faster to get preds from a PHI than with pred_iterator.
348    for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
349      BBPreds.insert(BBPN->getIncomingBlock(i));
350  } else {
351    BBPreds.insert(pred_begin(BB), pred_end(BB));
352  }
353
354  // Walk the preds of DestBB.
355  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
356    BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
357    if (BBPreds.count(Pred)) {   // Common predecessor?
358      BBI = DestBB->begin();
359      while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
360        const Value *V1 = PN->getIncomingValueForBlock(Pred);
361        const Value *V2 = PN->getIncomingValueForBlock(BB);
362
363        // If V2 is a phi node in BB, look up what the mapped value will be.
364        if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
365          if (V2PN->getParent() == BB)
366            V2 = V2PN->getIncomingValueForBlock(Pred);
367
368        // If there is a conflict, bail out.
369        if (V1 != V2) return false;
370      }
371    }
372  }
373
374  return true;
375}
376
377
378/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
379/// an unconditional branch in it.
380void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
381  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
382  BasicBlock *DestBB = BI->getSuccessor(0);
383
384  DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
385
386  // If the destination block has a single pred, then this is a trivial edge,
387  // just collapse it.
388  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
389    if (SinglePred != DestBB) {
390      // Remember if SinglePred was the entry block of the function.  If so, we
391      // will need to move BB back to the entry position.
392      bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
393      MergeBasicBlockIntoOnlyPred(DestBB, this);
394
395      if (isEntry && BB != &BB->getParent()->getEntryBlock())
396        BB->moveBefore(&BB->getParent()->getEntryBlock());
397
398      DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
399      return;
400    }
401  }
402
403  // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
404  // to handle the new incoming edges it is about to have.
405  PHINode *PN;
406  for (BasicBlock::iterator BBI = DestBB->begin();
407       (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
408    // Remove the incoming value for BB, and remember it.
409    Value *InVal = PN->removeIncomingValue(BB, false);
410
411    // Two options: either the InVal is a phi node defined in BB or it is some
412    // value that dominates BB.
413    PHINode *InValPhi = dyn_cast<PHINode>(InVal);
414    if (InValPhi && InValPhi->getParent() == BB) {
415      // Add all of the input values of the input PHI as inputs of this phi.
416      for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
417        PN->addIncoming(InValPhi->getIncomingValue(i),
418                        InValPhi->getIncomingBlock(i));
419    } else {
420      // Otherwise, add one instance of the dominating value for each edge that
421      // we will be adding.
422      if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
423        for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
424          PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
425      } else {
426        for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
427          PN->addIncoming(InVal, *PI);
428      }
429    }
430  }
431
432  // The PHIs are now updated, change everything that refers to BB to use
433  // DestBB and remove BB.
434  BB->replaceAllUsesWith(DestBB);
435  if (DT && !ModifiedDT) {
436    BasicBlock *BBIDom  = DT->getNode(BB)->getIDom()->getBlock();
437    BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock();
438    BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom);
439    DT->changeImmediateDominator(DestBB, NewIDom);
440    DT->eraseNode(BB);
441  }
442  if (PFI) {
443    PFI->replaceAllUses(BB, DestBB);
444    PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB));
445  }
446  BB->eraseFromParent();
447  ++NumBlocksElim;
448
449  DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
450}
451
452/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
453/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
454/// sink it into user blocks to reduce the number of virtual
455/// registers that must be created and coalesced.
456///
457/// Return true if any changes are made.
458///
459static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
460  // If this is a noop copy,
461  EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
462  EVT DstVT = TLI.getValueType(CI->getType());
463
464  // This is an fp<->int conversion?
465  if (SrcVT.isInteger() != DstVT.isInteger())
466    return false;
467
468  // If this is an extension, it will be a zero or sign extension, which
469  // isn't a noop.
470  if (SrcVT.bitsLT(DstVT)) return false;
471
472  // If these values will be promoted, find out what they will be promoted
473  // to.  This helps us consider truncates on PPC as noop copies when they
474  // are.
475  if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
476      TargetLowering::TypePromoteInteger)
477    SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
478  if (TLI.getTypeAction(CI->getContext(), DstVT) ==
479      TargetLowering::TypePromoteInteger)
480    DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
481
482  // If, after promotion, these are the same types, this is a noop copy.
483  if (SrcVT != DstVT)
484    return false;
485
486  BasicBlock *DefBB = CI->getParent();
487
488  /// InsertedCasts - Only insert a cast in each block once.
489  DenseMap<BasicBlock*, CastInst*> InsertedCasts;
490
491  bool MadeChange = false;
492  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
493       UI != E; ) {
494    Use &TheUse = UI.getUse();
495    Instruction *User = cast<Instruction>(*UI);
496
497    // Figure out which BB this cast is used in.  For PHI's this is the
498    // appropriate predecessor block.
499    BasicBlock *UserBB = User->getParent();
500    if (PHINode *PN = dyn_cast<PHINode>(User)) {
501      UserBB = PN->getIncomingBlock(UI);
502    }
503
504    // Preincrement use iterator so we don't invalidate it.
505    ++UI;
506
507    // If this user is in the same block as the cast, don't change the cast.
508    if (UserBB == DefBB) continue;
509
510    // If we have already inserted a cast into this block, use it.
511    CastInst *&InsertedCast = InsertedCasts[UserBB];
512
513    if (!InsertedCast) {
514      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
515      InsertedCast =
516        CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
517                         InsertPt);
518      MadeChange = true;
519    }
520
521    // Replace a use of the cast with a use of the new cast.
522    TheUse = InsertedCast;
523    ++NumCastUses;
524  }
525
526  // If we removed all uses, nuke the cast.
527  if (CI->use_empty()) {
528    CI->eraseFromParent();
529    MadeChange = true;
530  }
531
532  return MadeChange;
533}
534
535/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
536/// the number of virtual registers that must be created and coalesced.  This is
537/// a clear win except on targets with multiple condition code registers
538///  (PowerPC), where it might lose; some adjustment may be wanted there.
539///
540/// Return true if any changes are made.
541static bool OptimizeCmpExpression(CmpInst *CI) {
542  BasicBlock *DefBB = CI->getParent();
543
544  /// InsertedCmp - Only insert a cmp in each block once.
545  DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
546
547  bool MadeChange = false;
548  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
549       UI != E; ) {
550    Use &TheUse = UI.getUse();
551    Instruction *User = cast<Instruction>(*UI);
552
553    // Preincrement use iterator so we don't invalidate it.
554    ++UI;
555
556    // Don't bother for PHI nodes.
557    if (isa<PHINode>(User))
558      continue;
559
560    // Figure out which BB this cmp is used in.
561    BasicBlock *UserBB = User->getParent();
562
563    // If this user is in the same block as the cmp, don't change the cmp.
564    if (UserBB == DefBB) continue;
565
566    // If we have already inserted a cmp into this block, use it.
567    CmpInst *&InsertedCmp = InsertedCmps[UserBB];
568
569    if (!InsertedCmp) {
570      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
571      InsertedCmp =
572        CmpInst::Create(CI->getOpcode(),
573                        CI->getPredicate(),  CI->getOperand(0),
574                        CI->getOperand(1), "", InsertPt);
575      MadeChange = true;
576    }
577
578    // Replace a use of the cmp with a use of the new cmp.
579    TheUse = InsertedCmp;
580    ++NumCmpUses;
581  }
582
583  // If we removed all uses, nuke the cmp.
584  if (CI->use_empty())
585    CI->eraseFromParent();
586
587  return MadeChange;
588}
589
590namespace {
591class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls {
592protected:
593  void replaceCall(Value *With) {
594    CI->replaceAllUsesWith(With);
595    CI->eraseFromParent();
596  }
597  bool isFoldable(unsigned SizeCIOp, unsigned, bool) const {
598      if (ConstantInt *SizeCI =
599                             dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp)))
600        return SizeCI->isAllOnesValue();
601    return false;
602  }
603};
604} // end anonymous namespace
605
606bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
607  BasicBlock *BB = CI->getParent();
608
609  // Lower inline assembly if we can.
610  // If we found an inline asm expession, and if the target knows how to
611  // lower it to normal LLVM code, do so now.
612  if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
613    if (TLI->ExpandInlineAsm(CI)) {
614      // Avoid invalidating the iterator.
615      CurInstIterator = BB->begin();
616      // Avoid processing instructions out of order, which could cause
617      // reuse before a value is defined.
618      SunkAddrs.clear();
619      return true;
620    }
621    // Sink address computing for memory operands into the block.
622    if (OptimizeInlineAsmInst(CI))
623      return true;
624  }
625
626  // Lower all uses of llvm.objectsize.*
627  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
628  if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
629    bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
630    Type *ReturnTy = CI->getType();
631    Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
632
633    // Substituting this can cause recursive simplifications, which can
634    // invalidate our iterator.  Use a WeakVH to hold onto it in case this
635    // happens.
636    WeakVH IterHandle(CurInstIterator);
637
638    replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
639                                  TLInfo, ModifiedDT ? 0 : DT);
640
641    // If the iterator instruction was recursively deleted, start over at the
642    // start of the block.
643    if (IterHandle != CurInstIterator) {
644      CurInstIterator = BB->begin();
645      SunkAddrs.clear();
646    }
647    return true;
648  }
649
650  if (II && TLI) {
651    SmallVector<Value*, 2> PtrOps;
652    Type *AccessTy;
653    if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy))
654      while (!PtrOps.empty())
655        if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy))
656          return true;
657  }
658
659  // From here on out we're working with named functions.
660  if (CI->getCalledFunction() == 0) return false;
661
662  // We'll need DataLayout from here on out.
663  const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
664  if (!TD) return false;
665
666  // Lower all default uses of _chk calls.  This is very similar
667  // to what InstCombineCalls does, but here we are only lowering calls
668  // that have the default "don't know" as the objectsize.  Anything else
669  // should be left alone.
670  CodeGenPrepareFortifiedLibCalls Simplifier;
671  return Simplifier.fold(CI, TD, TLInfo);
672}
673
674/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
675/// instructions to the predecessor to enable tail call optimizations. The
676/// case it is currently looking for is:
677/// @code
678/// bb0:
679///   %tmp0 = tail call i32 @f0()
680///   br label %return
681/// bb1:
682///   %tmp1 = tail call i32 @f1()
683///   br label %return
684/// bb2:
685///   %tmp2 = tail call i32 @f2()
686///   br label %return
687/// return:
688///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
689///   ret i32 %retval
690/// @endcode
691///
692/// =>
693///
694/// @code
695/// bb0:
696///   %tmp0 = tail call i32 @f0()
697///   ret i32 %tmp0
698/// bb1:
699///   %tmp1 = tail call i32 @f1()
700///   ret i32 %tmp1
701/// bb2:
702///   %tmp2 = tail call i32 @f2()
703///   ret i32 %tmp2
704/// @endcode
705bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) {
706  if (!TLI)
707    return false;
708
709  ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
710  if (!RI)
711    return false;
712
713  PHINode *PN = 0;
714  BitCastInst *BCI = 0;
715  Value *V = RI->getReturnValue();
716  if (V) {
717    BCI = dyn_cast<BitCastInst>(V);
718    if (BCI)
719      V = BCI->getOperand(0);
720
721    PN = dyn_cast<PHINode>(V);
722    if (!PN)
723      return false;
724  }
725
726  if (PN && PN->getParent() != BB)
727    return false;
728
729  // It's not safe to eliminate the sign / zero extension of the return value.
730  // See llvm::isInTailCallPosition().
731  const Function *F = BB->getParent();
732  AttributeSet CallerAttrs = F->getAttributes();
733  if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
734      CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
735    return false;
736
737  // Make sure there are no instructions between the PHI and return, or that the
738  // return is the first instruction in the block.
739  if (PN) {
740    BasicBlock::iterator BI = BB->begin();
741    do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
742    if (&*BI == BCI)
743      // Also skip over the bitcast.
744      ++BI;
745    if (&*BI != RI)
746      return false;
747  } else {
748    BasicBlock::iterator BI = BB->begin();
749    while (isa<DbgInfoIntrinsic>(BI)) ++BI;
750    if (&*BI != RI)
751      return false;
752  }
753
754  /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
755  /// call.
756  SmallVector<CallInst*, 4> TailCalls;
757  if (PN) {
758    for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
759      CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
760      // Make sure the phi value is indeed produced by the tail call.
761      if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
762          TLI->mayBeEmittedAsTailCall(CI))
763        TailCalls.push_back(CI);
764    }
765  } else {
766    SmallPtrSet<BasicBlock*, 4> VisitedBBs;
767    for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
768      if (!VisitedBBs.insert(*PI))
769        continue;
770
771      BasicBlock::InstListType &InstList = (*PI)->getInstList();
772      BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
773      BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
774      do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
775      if (RI == RE)
776        continue;
777
778      CallInst *CI = dyn_cast<CallInst>(&*RI);
779      if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI))
780        TailCalls.push_back(CI);
781    }
782  }
783
784  bool Changed = false;
785  for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
786    CallInst *CI = TailCalls[i];
787    CallSite CS(CI);
788
789    // Conservatively require the attributes of the call to match those of the
790    // return. Ignore noalias because it doesn't affect the call sequence.
791    AttributeSet CalleeAttrs = CS.getAttributes();
792    if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
793          removeAttribute(Attribute::NoAlias) !=
794        AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
795          removeAttribute(Attribute::NoAlias))
796      continue;
797
798    // Make sure the call instruction is followed by an unconditional branch to
799    // the return block.
800    BasicBlock *CallBB = CI->getParent();
801    BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
802    if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
803      continue;
804
805    // Duplicate the return into CallBB.
806    (void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
807    ModifiedDT = Changed = true;
808    ++NumRetsDup;
809  }
810
811  // If we eliminated all predecessors of the block, delete the block now.
812  if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
813    BB->eraseFromParent();
814
815  return Changed;
816}
817
818//===----------------------------------------------------------------------===//
819// Memory Optimization
820//===----------------------------------------------------------------------===//
821
822namespace {
823
824/// ExtAddrMode - This is an extended version of TargetLowering::AddrMode
825/// which holds actual Value*'s for register values.
826struct ExtAddrMode : public TargetLowering::AddrMode {
827  Value *BaseReg;
828  Value *ScaledReg;
829  ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
830  void print(raw_ostream &OS) const;
831  void dump() const;
832
833  bool operator==(const ExtAddrMode& O) const {
834    return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
835           (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
836           (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
837  }
838};
839
840static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
841  AM.print(OS);
842  return OS;
843}
844
845void ExtAddrMode::print(raw_ostream &OS) const {
846  bool NeedPlus = false;
847  OS << "[";
848  if (BaseGV) {
849    OS << (NeedPlus ? " + " : "")
850       << "GV:";
851    WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
852    NeedPlus = true;
853  }
854
855  if (BaseOffs)
856    OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
857
858  if (BaseReg) {
859    OS << (NeedPlus ? " + " : "")
860       << "Base:";
861    WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
862    NeedPlus = true;
863  }
864  if (Scale) {
865    OS << (NeedPlus ? " + " : "")
866       << Scale << "*";
867    WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
868    NeedPlus = true;
869  }
870
871  OS << ']';
872}
873
874#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
875void ExtAddrMode::dump() const {
876  print(dbgs());
877  dbgs() << '\n';
878}
879#endif
880
881
882/// \brief A helper class for matching addressing modes.
883///
884/// This encapsulates the logic for matching the target-legal addressing modes.
885class AddressingModeMatcher {
886  SmallVectorImpl<Instruction*> &AddrModeInsts;
887  const TargetLowering &TLI;
888
889  /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
890  /// the memory instruction that we're computing this address for.
891  Type *AccessTy;
892  Instruction *MemoryInst;
893
894  /// AddrMode - This is the addressing mode that we're building up.  This is
895  /// part of the return value of this addressing mode matching stuff.
896  ExtAddrMode &AddrMode;
897
898  /// IgnoreProfitability - This is set to true when we should not do
899  /// profitability checks.  When true, IsProfitableToFoldIntoAddressingMode
900  /// always returns true.
901  bool IgnoreProfitability;
902
903  AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI,
904                        const TargetLowering &T, Type *AT,
905                        Instruction *MI, ExtAddrMode &AM)
906    : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) {
907    IgnoreProfitability = false;
908  }
909public:
910
911  /// Match - Find the maximal addressing mode that a load/store of V can fold,
912  /// give an access type of AccessTy.  This returns a list of involved
913  /// instructions in AddrModeInsts.
914  static ExtAddrMode Match(Value *V, Type *AccessTy,
915                           Instruction *MemoryInst,
916                           SmallVectorImpl<Instruction*> &AddrModeInsts,
917                           const TargetLowering &TLI) {
918    ExtAddrMode Result;
919
920    bool Success =
921      AddressingModeMatcher(AddrModeInsts, TLI, AccessTy,
922                            MemoryInst, Result).MatchAddr(V, 0);
923    (void)Success; assert(Success && "Couldn't select *anything*?");
924    return Result;
925  }
926private:
927  bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
928  bool MatchAddr(Value *V, unsigned Depth);
929  bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth);
930  bool IsProfitableToFoldIntoAddressingMode(Instruction *I,
931                                            ExtAddrMode &AMBefore,
932                                            ExtAddrMode &AMAfter);
933  bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
934};
935
936/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
937/// Return true and update AddrMode if this addr mode is legal for the target,
938/// false if not.
939bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
940                                             unsigned Depth) {
941  // If Scale is 1, then this is the same as adding ScaleReg to the addressing
942  // mode.  Just process that directly.
943  if (Scale == 1)
944    return MatchAddr(ScaleReg, Depth);
945
946  // If the scale is 0, it takes nothing to add this.
947  if (Scale == 0)
948    return true;
949
950  // If we already have a scale of this value, we can add to it, otherwise, we
951  // need an available scale field.
952  if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
953    return false;
954
955  ExtAddrMode TestAddrMode = AddrMode;
956
957  // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
958  // [A+B + A*7] -> [B+A*8].
959  TestAddrMode.Scale += Scale;
960  TestAddrMode.ScaledReg = ScaleReg;
961
962  // If the new address isn't legal, bail out.
963  if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
964    return false;
965
966  // It was legal, so commit it.
967  AddrMode = TestAddrMode;
968
969  // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
970  // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
971  // X*Scale + C*Scale to addr mode.
972  ConstantInt *CI = 0; Value *AddLHS = 0;
973  if (isa<Instruction>(ScaleReg) &&  // not a constant expr.
974      match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
975    TestAddrMode.ScaledReg = AddLHS;
976    TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
977
978    // If this addressing mode is legal, commit it and remember that we folded
979    // this instruction.
980    if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
981      AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
982      AddrMode = TestAddrMode;
983      return true;
984    }
985  }
986
987  // Otherwise, not (x+c)*scale, just return what we have.
988  return true;
989}
990
991/// MightBeFoldableInst - This is a little filter, which returns true if an
992/// addressing computation involving I might be folded into a load/store
993/// accessing it.  This doesn't need to be perfect, but needs to accept at least
994/// the set of instructions that MatchOperationAddr can.
995static bool MightBeFoldableInst(Instruction *I) {
996  switch (I->getOpcode()) {
997  case Instruction::BitCast:
998    // Don't touch identity bitcasts.
999    if (I->getType() == I->getOperand(0)->getType())
1000      return false;
1001    return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
1002  case Instruction::PtrToInt:
1003    // PtrToInt is always a noop, as we know that the int type is pointer sized.
1004    return true;
1005  case Instruction::IntToPtr:
1006    // We know the input is intptr_t, so this is foldable.
1007    return true;
1008  case Instruction::Add:
1009    return true;
1010  case Instruction::Mul:
1011  case Instruction::Shl:
1012    // Can only handle X*C and X << C.
1013    return isa<ConstantInt>(I->getOperand(1));
1014  case Instruction::GetElementPtr:
1015    return true;
1016  default:
1017    return false;
1018  }
1019}
1020
1021/// MatchOperationAddr - Given an instruction or constant expr, see if we can
1022/// fold the operation into the addressing mode.  If so, update the addressing
1023/// mode and return true, otherwise return false without modifying AddrMode.
1024bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
1025                                               unsigned Depth) {
1026  // Avoid exponential behavior on extremely deep expression trees.
1027  if (Depth >= 5) return false;
1028
1029  switch (Opcode) {
1030  case Instruction::PtrToInt:
1031    // PtrToInt is always a noop, as we know that the int type is pointer sized.
1032    return MatchAddr(AddrInst->getOperand(0), Depth);
1033  case Instruction::IntToPtr:
1034    // This inttoptr is a no-op if the integer type is pointer sized.
1035    if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
1036        TLI.getPointerTy())
1037      return MatchAddr(AddrInst->getOperand(0), Depth);
1038    return false;
1039  case Instruction::BitCast:
1040    // BitCast is always a noop, and we can handle it as long as it is
1041    // int->int or pointer->pointer (we don't want int<->fp or something).
1042    if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
1043         AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
1044        // Don't touch identity bitcasts.  These were probably put here by LSR,
1045        // and we don't want to mess around with them.  Assume it knows what it
1046        // is doing.
1047        AddrInst->getOperand(0)->getType() != AddrInst->getType())
1048      return MatchAddr(AddrInst->getOperand(0), Depth);
1049    return false;
1050  case Instruction::Add: {
1051    // Check to see if we can merge in the RHS then the LHS.  If so, we win.
1052    ExtAddrMode BackupAddrMode = AddrMode;
1053    unsigned OldSize = AddrModeInsts.size();
1054    if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
1055        MatchAddr(AddrInst->getOperand(0), Depth+1))
1056      return true;
1057
1058    // Restore the old addr mode info.
1059    AddrMode = BackupAddrMode;
1060    AddrModeInsts.resize(OldSize);
1061
1062    // Otherwise this was over-aggressive.  Try merging in the LHS then the RHS.
1063    if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
1064        MatchAddr(AddrInst->getOperand(1), Depth+1))
1065      return true;
1066
1067    // Otherwise we definitely can't merge the ADD in.
1068    AddrMode = BackupAddrMode;
1069    AddrModeInsts.resize(OldSize);
1070    break;
1071  }
1072  //case Instruction::Or:
1073  // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
1074  //break;
1075  case Instruction::Mul:
1076  case Instruction::Shl: {
1077    // Can only handle X*C and X << C.
1078    ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
1079    if (!RHS) return false;
1080    int64_t Scale = RHS->getSExtValue();
1081    if (Opcode == Instruction::Shl)
1082      Scale = 1LL << Scale;
1083
1084    return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
1085  }
1086  case Instruction::GetElementPtr: {
1087    // Scan the GEP.  We check it if it contains constant offsets and at most
1088    // one variable offset.
1089    int VariableOperand = -1;
1090    unsigned VariableScale = 0;
1091
1092    int64_t ConstantOffset = 0;
1093    const DataLayout *TD = TLI.getDataLayout();
1094    gep_type_iterator GTI = gep_type_begin(AddrInst);
1095    for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
1096      if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1097        const StructLayout *SL = TD->getStructLayout(STy);
1098        unsigned Idx =
1099          cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
1100        ConstantOffset += SL->getElementOffset(Idx);
1101      } else {
1102        uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
1103        if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
1104          ConstantOffset += CI->getSExtValue()*TypeSize;
1105        } else if (TypeSize) {  // Scales of zero don't do anything.
1106          // We only allow one variable index at the moment.
1107          if (VariableOperand != -1)
1108            return false;
1109
1110          // Remember the variable index.
1111          VariableOperand = i;
1112          VariableScale = TypeSize;
1113        }
1114      }
1115    }
1116
1117    // A common case is for the GEP to only do a constant offset.  In this case,
1118    // just add it to the disp field and check validity.
1119    if (VariableOperand == -1) {
1120      AddrMode.BaseOffs += ConstantOffset;
1121      if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
1122        // Check to see if we can fold the base pointer in too.
1123        if (MatchAddr(AddrInst->getOperand(0), Depth+1))
1124          return true;
1125      }
1126      AddrMode.BaseOffs -= ConstantOffset;
1127      return false;
1128    }
1129
1130    // Save the valid addressing mode in case we can't match.
1131    ExtAddrMode BackupAddrMode = AddrMode;
1132    unsigned OldSize = AddrModeInsts.size();
1133
1134    // See if the scale and offset amount is valid for this target.
1135    AddrMode.BaseOffs += ConstantOffset;
1136
1137    // Match the base operand of the GEP.
1138    if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
1139      // If it couldn't be matched, just stuff the value in a register.
1140      if (AddrMode.HasBaseReg) {
1141        AddrMode = BackupAddrMode;
1142        AddrModeInsts.resize(OldSize);
1143        return false;
1144      }
1145      AddrMode.HasBaseReg = true;
1146      AddrMode.BaseReg = AddrInst->getOperand(0);
1147    }
1148
1149    // Match the remaining variable portion of the GEP.
1150    if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
1151                          Depth)) {
1152      // If it couldn't be matched, try stuffing the base into a register
1153      // instead of matching it, and retrying the match of the scale.
1154      AddrMode = BackupAddrMode;
1155      AddrModeInsts.resize(OldSize);
1156      if (AddrMode.HasBaseReg)
1157        return false;
1158      AddrMode.HasBaseReg = true;
1159      AddrMode.BaseReg = AddrInst->getOperand(0);
1160      AddrMode.BaseOffs += ConstantOffset;
1161      if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
1162                            VariableScale, Depth)) {
1163        // If even that didn't work, bail.
1164        AddrMode = BackupAddrMode;
1165        AddrModeInsts.resize(OldSize);
1166        return false;
1167      }
1168    }
1169
1170    return true;
1171  }
1172  }
1173  return false;
1174}
1175
1176/// MatchAddr - If we can, try to add the value of 'Addr' into the current
1177/// addressing mode.  If Addr can't be added to AddrMode this returns false and
1178/// leaves AddrMode unmodified.  This assumes that Addr is either a pointer type
1179/// or intptr_t for the target.
1180///
1181bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
1182  if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
1183    // Fold in immediates if legal for the target.
1184    AddrMode.BaseOffs += CI->getSExtValue();
1185    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
1186      return true;
1187    AddrMode.BaseOffs -= CI->getSExtValue();
1188  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
1189    // If this is a global variable, try to fold it into the addressing mode.
1190    if (AddrMode.BaseGV == 0) {
1191      AddrMode.BaseGV = GV;
1192      if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
1193        return true;
1194      AddrMode.BaseGV = 0;
1195    }
1196  } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
1197    ExtAddrMode BackupAddrMode = AddrMode;
1198    unsigned OldSize = AddrModeInsts.size();
1199
1200    // Check to see if it is possible to fold this operation.
1201    if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
1202      // Okay, it's possible to fold this.  Check to see if it is actually
1203      // *profitable* to do so.  We use a simple cost model to avoid increasing
1204      // register pressure too much.
1205      if (I->hasOneUse() ||
1206          IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
1207        AddrModeInsts.push_back(I);
1208        return true;
1209      }
1210
1211      // It isn't profitable to do this, roll back.
1212      //cerr << "NOT FOLDING: " << *I;
1213      AddrMode = BackupAddrMode;
1214      AddrModeInsts.resize(OldSize);
1215    }
1216  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
1217    if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
1218      return true;
1219  } else if (isa<ConstantPointerNull>(Addr)) {
1220    // Null pointer gets folded without affecting the addressing mode.
1221    return true;
1222  }
1223
1224  // Worse case, the target should support [reg] addressing modes. :)
1225  if (!AddrMode.HasBaseReg) {
1226    AddrMode.HasBaseReg = true;
1227    AddrMode.BaseReg = Addr;
1228    // Still check for legality in case the target supports [imm] but not [i+r].
1229    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
1230      return true;
1231    AddrMode.HasBaseReg = false;
1232    AddrMode.BaseReg = 0;
1233  }
1234
1235  // If the base register is already taken, see if we can do [r+r].
1236  if (AddrMode.Scale == 0) {
1237    AddrMode.Scale = 1;
1238    AddrMode.ScaledReg = Addr;
1239    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
1240      return true;
1241    AddrMode.Scale = 0;
1242    AddrMode.ScaledReg = 0;
1243  }
1244  // Couldn't match.
1245  return false;
1246}
1247
1248/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
1249/// inline asm call are due to memory operands.  If so, return true, otherwise
1250/// return false.
1251static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
1252                                    const TargetLowering &TLI) {
1253  TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI));
1254  for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
1255    TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
1256
1257    // Compute the constraint code and ConstraintType to use.
1258    TLI.ComputeConstraintToUse(OpInfo, SDValue());
1259
1260    // If this asm operand is our Value*, and if it isn't an indirect memory
1261    // operand, we can't fold it!
1262    if (OpInfo.CallOperandVal == OpVal &&
1263        (OpInfo.ConstraintType != TargetLowering::C_Memory ||
1264         !OpInfo.isIndirect))
1265      return false;
1266  }
1267
1268  return true;
1269}
1270
1271/// FindAllMemoryUses - Recursively walk all the uses of I until we find a
1272/// memory use.  If we find an obviously non-foldable instruction, return true.
1273/// Add the ultimately found memory instructions to MemoryUses.
1274static bool FindAllMemoryUses(Instruction *I,
1275                SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
1276                              SmallPtrSet<Instruction*, 16> &ConsideredInsts,
1277                              const TargetLowering &TLI) {
1278  // If we already considered this instruction, we're done.
1279  if (!ConsideredInsts.insert(I))
1280    return false;
1281
1282  // If this is an obviously unfoldable instruction, bail out.
1283  if (!MightBeFoldableInst(I))
1284    return true;
1285
1286  // Loop over all the uses, recursively processing them.
1287  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
1288       UI != E; ++UI) {
1289    User *U = *UI;
1290
1291    if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1292      MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
1293      continue;
1294    }
1295
1296    if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1297      unsigned opNo = UI.getOperandNo();
1298      if (opNo == 0) return true; // Storing addr, not into addr.
1299      MemoryUses.push_back(std::make_pair(SI, opNo));
1300      continue;
1301    }
1302
1303    if (CallInst *CI = dyn_cast<CallInst>(U)) {
1304      InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
1305      if (!IA) return true;
1306
1307      // If this is a memory operand, we're cool, otherwise bail out.
1308      if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
1309        return true;
1310      continue;
1311    }
1312
1313    if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
1314                          TLI))
1315      return true;
1316  }
1317
1318  return false;
1319}
1320
1321/// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
1322/// the use site that we're folding it into.  If so, there is no cost to
1323/// include it in the addressing mode.  KnownLive1 and KnownLive2 are two values
1324/// that we know are live at the instruction already.
1325bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
1326                                                   Value *KnownLive2) {
1327  // If Val is either of the known-live values, we know it is live!
1328  if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
1329    return true;
1330
1331  // All values other than instructions and arguments (e.g. constants) are live.
1332  if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
1333
1334  // If Val is a constant sized alloca in the entry block, it is live, this is
1335  // true because it is just a reference to the stack/frame pointer, which is
1336  // live for the whole function.
1337  if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
1338    if (AI->isStaticAlloca())
1339      return true;
1340
1341  // Check to see if this value is already used in the memory instruction's
1342  // block.  If so, it's already live into the block at the very least, so we
1343  // can reasonably fold it.
1344  return Val->isUsedInBasicBlock(MemoryInst->getParent());
1345}
1346
1347/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
1348/// mode of the machine to fold the specified instruction into a load or store
1349/// that ultimately uses it.  However, the specified instruction has multiple
1350/// uses.  Given this, it may actually increase register pressure to fold it
1351/// into the load.  For example, consider this code:
1352///
1353///     X = ...
1354///     Y = X+1
1355///     use(Y)   -> nonload/store
1356///     Z = Y+1
1357///     load Z
1358///
1359/// In this case, Y has multiple uses, and can be folded into the load of Z
1360/// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
1361/// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
1362/// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
1363/// number of computations either.
1364///
1365/// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
1366/// X was live across 'load Z' for other reasons, we actually *would* want to
1367/// fold the addressing mode in the Z case.  This would make Y die earlier.
1368bool AddressingModeMatcher::
1369IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
1370                                     ExtAddrMode &AMAfter) {
1371  if (IgnoreProfitability) return true;
1372
1373  // AMBefore is the addressing mode before this instruction was folded into it,
1374  // and AMAfter is the addressing mode after the instruction was folded.  Get
1375  // the set of registers referenced by AMAfter and subtract out those
1376  // referenced by AMBefore: this is the set of values which folding in this
1377  // address extends the lifetime of.
1378  //
1379  // Note that there are only two potential values being referenced here,
1380  // BaseReg and ScaleReg (global addresses are always available, as are any
1381  // folded immediates).
1382  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
1383
1384  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
1385  // lifetime wasn't extended by adding this instruction.
1386  if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
1387    BaseReg = 0;
1388  if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
1389    ScaledReg = 0;
1390
1391  // If folding this instruction (and it's subexprs) didn't extend any live
1392  // ranges, we're ok with it.
1393  if (BaseReg == 0 && ScaledReg == 0)
1394    return true;
1395
1396  // If all uses of this instruction are ultimately load/store/inlineasm's,
1397  // check to see if their addressing modes will include this instruction.  If
1398  // so, we can fold it into all uses, so it doesn't matter if it has multiple
1399  // uses.
1400  SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
1401  SmallPtrSet<Instruction*, 16> ConsideredInsts;
1402  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
1403    return false;  // Has a non-memory, non-foldable use!
1404
1405  // Now that we know that all uses of this instruction are part of a chain of
1406  // computation involving only operations that could theoretically be folded
1407  // into a memory use, loop over each of these uses and see if they could
1408  // *actually* fold the instruction.
1409  SmallVector<Instruction*, 32> MatchedAddrModeInsts;
1410  for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
1411    Instruction *User = MemoryUses[i].first;
1412    unsigned OpNo = MemoryUses[i].second;
1413
1414    // Get the access type of this use.  If the use isn't a pointer, we don't
1415    // know what it accesses.
1416    Value *Address = User->getOperand(OpNo);
1417    if (!Address->getType()->isPointerTy())
1418      return false;
1419    Type *AddressAccessTy =
1420      cast<PointerType>(Address->getType())->getElementType();
1421
1422    // Do a match against the root of this address, ignoring profitability. This
1423    // will tell us if the addressing mode for the memory operation will
1424    // *actually* cover the shared instruction.
1425    ExtAddrMode Result;
1426    AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
1427                                  MemoryInst, Result);
1428    Matcher.IgnoreProfitability = true;
1429    bool Success = Matcher.MatchAddr(Address, 0);
1430    (void)Success; assert(Success && "Couldn't select *anything*?");
1431
1432    // If the match didn't cover I, then it won't be shared by it.
1433    if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
1434                  I) == MatchedAddrModeInsts.end())
1435      return false;
1436
1437    MatchedAddrModeInsts.clear();
1438  }
1439
1440  return true;
1441}
1442
1443} // end anonymous namespace
1444
1445/// IsNonLocalValue - Return true if the specified values are defined in a
1446/// different basic block than BB.
1447static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
1448  if (Instruction *I = dyn_cast<Instruction>(V))
1449    return I->getParent() != BB;
1450  return false;
1451}
1452
1453/// OptimizeMemoryInst - Load and Store Instructions often have
1454/// addressing modes that can do significant amounts of computation.  As such,
1455/// instruction selection will try to get the load or store to do as much
1456/// computation as possible for the program.  The problem is that isel can only
1457/// see within a single block.  As such, we sink as much legal addressing mode
1458/// stuff into the block as possible.
1459///
1460/// This method is used to optimize both load/store and inline asms with memory
1461/// operands.
1462bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
1463                                        Type *AccessTy) {
1464  Value *Repl = Addr;
1465
1466  // Try to collapse single-value PHI nodes.  This is necessary to undo
1467  // unprofitable PRE transformations.
1468  SmallVector<Value*, 8> worklist;
1469  SmallPtrSet<Value*, 16> Visited;
1470  worklist.push_back(Addr);
1471
1472  // Use a worklist to iteratively look through PHI nodes, and ensure that
1473  // the addressing mode obtained from the non-PHI roots of the graph
1474  // are equivalent.
1475  Value *Consensus = 0;
1476  unsigned NumUsesConsensus = 0;
1477  bool IsNumUsesConsensusValid = false;
1478  SmallVector<Instruction*, 16> AddrModeInsts;
1479  ExtAddrMode AddrMode;
1480  while (!worklist.empty()) {
1481    Value *V = worklist.back();
1482    worklist.pop_back();
1483
1484    // Break use-def graph loops.
1485    if (!Visited.insert(V)) {
1486      Consensus = 0;
1487      break;
1488    }
1489
1490    // For a PHI node, push all of its incoming values.
1491    if (PHINode *P = dyn_cast<PHINode>(V)) {
1492      for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
1493        worklist.push_back(P->getIncomingValue(i));
1494      continue;
1495    }
1496
1497    // For non-PHIs, determine the addressing mode being computed.
1498    SmallVector<Instruction*, 16> NewAddrModeInsts;
1499    ExtAddrMode NewAddrMode =
1500      AddressingModeMatcher::Match(V, AccessTy, MemoryInst,
1501                                   NewAddrModeInsts, *TLI);
1502
1503    // This check is broken into two cases with very similar code to avoid using
1504    // getNumUses() as much as possible. Some values have a lot of uses, so
1505    // calling getNumUses() unconditionally caused a significant compile-time
1506    // regression.
1507    if (!Consensus) {
1508      Consensus = V;
1509      AddrMode = NewAddrMode;
1510      AddrModeInsts = NewAddrModeInsts;
1511      continue;
1512    } else if (NewAddrMode == AddrMode) {
1513      if (!IsNumUsesConsensusValid) {
1514        NumUsesConsensus = Consensus->getNumUses();
1515        IsNumUsesConsensusValid = true;
1516      }
1517
1518      // Ensure that the obtained addressing mode is equivalent to that obtained
1519      // for all other roots of the PHI traversal.  Also, when choosing one
1520      // such root as representative, select the one with the most uses in order
1521      // to keep the cost modeling heuristics in AddressingModeMatcher
1522      // applicable.
1523      unsigned NumUses = V->getNumUses();
1524      if (NumUses > NumUsesConsensus) {
1525        Consensus = V;
1526        NumUsesConsensus = NumUses;
1527        AddrModeInsts = NewAddrModeInsts;
1528      }
1529      continue;
1530    }
1531
1532    Consensus = 0;
1533    break;
1534  }
1535
1536  // If the addressing mode couldn't be determined, or if multiple different
1537  // ones were determined, bail out now.
1538  if (!Consensus) return false;
1539
1540  // Check to see if any of the instructions supersumed by this addr mode are
1541  // non-local to I's BB.
1542  bool AnyNonLocal = false;
1543  for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
1544    if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
1545      AnyNonLocal = true;
1546      break;
1547    }
1548  }
1549
1550  // If all the instructions matched are already in this BB, don't do anything.
1551  if (!AnyNonLocal) {
1552    DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode << "\n");
1553    return false;
1554  }
1555
1556  // Insert this computation right after this user.  Since our caller is
1557  // scanning from the top of the BB to the bottom, reuse of the expr are
1558  // guaranteed to happen later.
1559  IRBuilder<> Builder(MemoryInst);
1560
1561  // Now that we determined the addressing expression we want to use and know
1562  // that we have to sink it into this block.  Check to see if we have already
1563  // done this for some other load/store instr in this block.  If so, reuse the
1564  // computation.
1565  Value *&SunkAddr = SunkAddrs[Addr];
1566  if (SunkAddr) {
1567    DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
1568                 << *MemoryInst);
1569    if (SunkAddr->getType() != Addr->getType())
1570      SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
1571  } else {
1572    DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
1573                 << *MemoryInst);
1574    Type *IntPtrTy =
1575          TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
1576
1577    Value *Result = 0;
1578
1579    // Start with the base register. Do this first so that subsequent address
1580    // matching finds it last, which will prevent it from trying to match it
1581    // as the scaled value in case it happens to be a mul. That would be
1582    // problematic if we've sunk a different mul for the scale, because then
1583    // we'd end up sinking both muls.
1584    if (AddrMode.BaseReg) {
1585      Value *V = AddrMode.BaseReg;
1586      if (V->getType()->isPointerTy())
1587        V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
1588      if (V->getType() != IntPtrTy)
1589        V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
1590      Result = V;
1591    }
1592
1593    // Add the scale value.
1594    if (AddrMode.Scale) {
1595      Value *V = AddrMode.ScaledReg;
1596      if (V->getType() == IntPtrTy) {
1597        // done.
1598      } else if (V->getType()->isPointerTy()) {
1599        V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
1600      } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
1601                 cast<IntegerType>(V->getType())->getBitWidth()) {
1602        V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
1603      } else {
1604        V = Builder.CreateSExt(V, IntPtrTy, "sunkaddr");
1605      }
1606      if (AddrMode.Scale != 1)
1607        V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
1608                              "sunkaddr");
1609      if (Result)
1610        Result = Builder.CreateAdd(Result, V, "sunkaddr");
1611      else
1612        Result = V;
1613    }
1614
1615    // Add in the BaseGV if present.
1616    if (AddrMode.BaseGV) {
1617      Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
1618      if (Result)
1619        Result = Builder.CreateAdd(Result, V, "sunkaddr");
1620      else
1621        Result = V;
1622    }
1623
1624    // Add in the Base Offset if present.
1625    if (AddrMode.BaseOffs) {
1626      Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
1627      if (Result)
1628        Result = Builder.CreateAdd(Result, V, "sunkaddr");
1629      else
1630        Result = V;
1631    }
1632
1633    if (Result == 0)
1634      SunkAddr = Constant::getNullValue(Addr->getType());
1635    else
1636      SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
1637  }
1638
1639  MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
1640
1641  // If we have no uses, recursively delete the value and all dead instructions
1642  // using it.
1643  if (Repl->use_empty()) {
1644    // This can cause recursive deletion, which can invalidate our iterator.
1645    // Use a WeakVH to hold onto it in case this happens.
1646    WeakVH IterHandle(CurInstIterator);
1647    BasicBlock *BB = CurInstIterator->getParent();
1648
1649    RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
1650
1651    if (IterHandle != CurInstIterator) {
1652      // If the iterator instruction was recursively deleted, start over at the
1653      // start of the block.
1654      CurInstIterator = BB->begin();
1655      SunkAddrs.clear();
1656    } else {
1657      // This address is now available for reassignment, so erase the table
1658      // entry; we don't want to match some completely different instruction.
1659      SunkAddrs[Addr] = 0;
1660    }
1661  }
1662  ++NumMemoryInsts;
1663  return true;
1664}
1665
1666/// OptimizeInlineAsmInst - If there are any memory operands, use
1667/// OptimizeMemoryInst to sink their address computing into the block when
1668/// possible / profitable.
1669bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
1670  bool MadeChange = false;
1671
1672  TargetLowering::AsmOperandInfoVector
1673    TargetConstraints = TLI->ParseConstraints(CS);
1674  unsigned ArgNo = 0;
1675  for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
1676    TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
1677
1678    // Compute the constraint code and ConstraintType to use.
1679    TLI->ComputeConstraintToUse(OpInfo, SDValue());
1680
1681    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
1682        OpInfo.isIndirect) {
1683      Value *OpVal = CS->getArgOperand(ArgNo++);
1684      MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
1685    } else if (OpInfo.Type == InlineAsm::isInput)
1686      ArgNo++;
1687  }
1688
1689  return MadeChange;
1690}
1691
1692/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same
1693/// basic block as the load, unless conditions are unfavorable. This allows
1694/// SelectionDAG to fold the extend into the load.
1695///
1696bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
1697  // Look for a load being extended.
1698  LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0));
1699  if (!LI) return false;
1700
1701  // If they're already in the same block, there's nothing to do.
1702  if (LI->getParent() == I->getParent())
1703    return false;
1704
1705  // If the load has other users and the truncate is not free, this probably
1706  // isn't worthwhile.
1707  if (!LI->hasOneUse() &&
1708      TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) ||
1709              !TLI->isTypeLegal(TLI->getValueType(I->getType()))) &&
1710      !TLI->isTruncateFree(I->getType(), LI->getType()))
1711    return false;
1712
1713  // Check whether the target supports casts folded into loads.
1714  unsigned LType;
1715  if (isa<ZExtInst>(I))
1716    LType = ISD::ZEXTLOAD;
1717  else {
1718    assert(isa<SExtInst>(I) && "Unexpected ext type!");
1719    LType = ISD::SEXTLOAD;
1720  }
1721  if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType())))
1722    return false;
1723
1724  // Move the extend into the same block as the load, so that SelectionDAG
1725  // can fold it.
1726  I->removeFromParent();
1727  I->insertAfter(LI);
1728  ++NumExtsMoved;
1729  return true;
1730}
1731
1732bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
1733  BasicBlock *DefBB = I->getParent();
1734
1735  // If the result of a {s|z}ext and its source are both live out, rewrite all
1736  // other uses of the source with result of extension.
1737  Value *Src = I->getOperand(0);
1738  if (Src->hasOneUse())
1739    return false;
1740
1741  // Only do this xform if truncating is free.
1742  if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
1743    return false;
1744
1745  // Only safe to perform the optimization if the source is also defined in
1746  // this block.
1747  if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
1748    return false;
1749
1750  bool DefIsLiveOut = false;
1751  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
1752       UI != E; ++UI) {
1753    Instruction *User = cast<Instruction>(*UI);
1754
1755    // Figure out which BB this ext is used in.
1756    BasicBlock *UserBB = User->getParent();
1757    if (UserBB == DefBB) continue;
1758    DefIsLiveOut = true;
1759    break;
1760  }
1761  if (!DefIsLiveOut)
1762    return false;
1763
1764  // Make sure non of the uses are PHI nodes.
1765  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
1766       UI != E; ++UI) {
1767    Instruction *User = cast<Instruction>(*UI);
1768    BasicBlock *UserBB = User->getParent();
1769    if (UserBB == DefBB) continue;
1770    // Be conservative. We don't want this xform to end up introducing
1771    // reloads just before load / store instructions.
1772    if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
1773      return false;
1774  }
1775
1776  // InsertedTruncs - Only insert one trunc in each block once.
1777  DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
1778
1779  bool MadeChange = false;
1780  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
1781       UI != E; ++UI) {
1782    Use &TheUse = UI.getUse();
1783    Instruction *User = cast<Instruction>(*UI);
1784
1785    // Figure out which BB this ext is used in.
1786    BasicBlock *UserBB = User->getParent();
1787    if (UserBB == DefBB) continue;
1788
1789    // Both src and def are live in this block. Rewrite the use.
1790    Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
1791
1792    if (!InsertedTrunc) {
1793      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1794      InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
1795    }
1796
1797    // Replace a use of the {s|z}ext source with a use of the result.
1798    TheUse = InsertedTrunc;
1799    ++NumExtUses;
1800    MadeChange = true;
1801  }
1802
1803  return MadeChange;
1804}
1805
1806/// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be
1807/// turned into an explicit branch.
1808static bool isFormingBranchFromSelectProfitable(SelectInst *SI) {
1809  // FIXME: This should use the same heuristics as IfConversion to determine
1810  // whether a select is better represented as a branch.  This requires that
1811  // branch probability metadata is preserved for the select, which is not the
1812  // case currently.
1813
1814  CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
1815
1816  // If the branch is predicted right, an out of order CPU can avoid blocking on
1817  // the compare.  Emit cmovs on compares with a memory operand as branches to
1818  // avoid stalls on the load from memory.  If the compare has more than one use
1819  // there's probably another cmov or setcc around so it's not worth emitting a
1820  // branch.
1821  if (!Cmp)
1822    return false;
1823
1824  Value *CmpOp0 = Cmp->getOperand(0);
1825  Value *CmpOp1 = Cmp->getOperand(1);
1826
1827  // We check that the memory operand has one use to avoid uses of the loaded
1828  // value directly after the compare, making branches unprofitable.
1829  return Cmp->hasOneUse() &&
1830         ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) ||
1831          (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse()));
1832}
1833
1834
1835/// If we have a SelectInst that will likely profit from branch prediction,
1836/// turn it into a branch.
1837bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
1838  bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
1839
1840  // Can we convert the 'select' to CF ?
1841  if (DisableSelectToBranch || OptSize || !TLI || VectorCond)
1842    return false;
1843
1844  TargetLowering::SelectSupportKind SelectKind;
1845  if (VectorCond)
1846    SelectKind = TargetLowering::VectorMaskSelect;
1847  else if (SI->getType()->isVectorTy())
1848    SelectKind = TargetLowering::ScalarCondVectorVal;
1849  else
1850    SelectKind = TargetLowering::ScalarValSelect;
1851
1852  // Do we have efficient codegen support for this kind of 'selects' ?
1853  if (TLI->isSelectSupported(SelectKind)) {
1854    // We have efficient codegen support for the select instruction.
1855    // Check if it is profitable to keep this 'select'.
1856    if (!TLI->isPredictableSelectExpensive() ||
1857        !isFormingBranchFromSelectProfitable(SI))
1858      return false;
1859  }
1860
1861  ModifiedDT = true;
1862
1863  // First, we split the block containing the select into 2 blocks.
1864  BasicBlock *StartBlock = SI->getParent();
1865  BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI));
1866  BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
1867
1868  // Create a new block serving as the landing pad for the branch.
1869  BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid",
1870                                             NextBlock->getParent(), NextBlock);
1871
1872  // Move the unconditional branch from the block with the select in it into our
1873  // landing pad block.
1874  StartBlock->getTerminator()->eraseFromParent();
1875  BranchInst::Create(NextBlock, SmallBlock);
1876
1877  // Insert the real conditional branch based on the original condition.
1878  BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI);
1879
1880  // The select itself is replaced with a PHI Node.
1881  PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin());
1882  PN->takeName(SI);
1883  PN->addIncoming(SI->getTrueValue(), StartBlock);
1884  PN->addIncoming(SI->getFalseValue(), SmallBlock);
1885  SI->replaceAllUsesWith(PN);
1886  SI->eraseFromParent();
1887
1888  // Instruct OptimizeBlock to skip to the next block.
1889  CurInstIterator = StartBlock->end();
1890  ++NumSelectsExpanded;
1891  return true;
1892}
1893
1894bool CodeGenPrepare::OptimizeInst(Instruction *I) {
1895  if (PHINode *P = dyn_cast<PHINode>(I)) {
1896    // It is possible for very late stage optimizations (such as SimplifyCFG)
1897    // to introduce PHI nodes too late to be cleaned up.  If we detect such a
1898    // trivial PHI, go ahead and zap it here.
1899    if (Value *V = SimplifyInstruction(P)) {
1900      P->replaceAllUsesWith(V);
1901      P->eraseFromParent();
1902      ++NumPHIsElim;
1903      return true;
1904    }
1905    return false;
1906  }
1907
1908  if (CastInst *CI = dyn_cast<CastInst>(I)) {
1909    // If the source of the cast is a constant, then this should have
1910    // already been constant folded.  The only reason NOT to constant fold
1911    // it is if something (e.g. LSR) was careful to place the constant
1912    // evaluation in a block other than then one that uses it (e.g. to hoist
1913    // the address of globals out of a loop).  If this is the case, we don't
1914    // want to forward-subst the cast.
1915    if (isa<Constant>(CI->getOperand(0)))
1916      return false;
1917
1918    if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
1919      return true;
1920
1921    if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
1922      bool MadeChange = MoveExtToFormExtLoad(I);
1923      return MadeChange | OptimizeExtUses(I);
1924    }
1925    return false;
1926  }
1927
1928  if (CmpInst *CI = dyn_cast<CmpInst>(I))
1929    return OptimizeCmpExpression(CI);
1930
1931  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1932    if (TLI)
1933      return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
1934    return false;
1935  }
1936
1937  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1938    if (TLI)
1939      return OptimizeMemoryInst(I, SI->getOperand(1),
1940                                SI->getOperand(0)->getType());
1941    return false;
1942  }
1943
1944  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1945    if (GEPI->hasAllZeroIndices()) {
1946      /// The GEP operand must be a pointer, so must its result -> BitCast
1947      Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
1948                                        GEPI->getName(), GEPI);
1949      GEPI->replaceAllUsesWith(NC);
1950      GEPI->eraseFromParent();
1951      ++NumGEPsElim;
1952      OptimizeInst(NC);
1953      return true;
1954    }
1955    return false;
1956  }
1957
1958  if (CallInst *CI = dyn_cast<CallInst>(I))
1959    return OptimizeCallInst(CI);
1960
1961  if (SelectInst *SI = dyn_cast<SelectInst>(I))
1962    return OptimizeSelectInst(SI);
1963
1964  return false;
1965}
1966
1967// In this pass we look for GEP and cast instructions that are used
1968// across basic blocks and rewrite them to improve basic-block-at-a-time
1969// selection.
1970bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
1971  SunkAddrs.clear();
1972  bool MadeChange = false;
1973
1974  CurInstIterator = BB.begin();
1975  while (CurInstIterator != BB.end())
1976    MadeChange |= OptimizeInst(CurInstIterator++);
1977
1978  MadeChange |= DupRetToEnableTailCallOpts(&BB);
1979
1980  return MadeChange;
1981}
1982
1983// llvm.dbg.value is far away from the value then iSel may not be able
1984// handle it properly. iSel will drop llvm.dbg.value if it can not
1985// find a node corresponding to the value.
1986bool CodeGenPrepare::PlaceDbgValues(Function &F) {
1987  bool MadeChange = false;
1988  for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
1989    Instruction *PrevNonDbgInst = NULL;
1990    for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
1991      Instruction *Insn = BI; ++BI;
1992      DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
1993      if (!DVI) {
1994        PrevNonDbgInst = Insn;
1995        continue;
1996      }
1997
1998      Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
1999      if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
2000        DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
2001        DVI->removeFromParent();
2002        if (isa<PHINode>(VI))
2003          DVI->insertBefore(VI->getParent()->getFirstInsertionPt());
2004        else
2005          DVI->insertAfter(VI);
2006        MadeChange = true;
2007        ++NumDbgValueMoved;
2008      }
2009    }
2010  }
2011  return MadeChange;
2012}
2013