CodeGenPrepare.cpp revision 950d3db5f478a84242a90cafce0d8dfc4f8b1152
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass munges the code in the input function to better prepare it for
11// SelectionDAG-based code generation. This works around limitations in it's
12// basic-block-at-a-time approach. It should eventually be removed.
13//
14//===----------------------------------------------------------------------===//
15
16#define DEBUG_TYPE "codegenprepare"
17#include "llvm/Transforms/Scalar.h"
18#include "llvm/Constants.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Function.h"
21#include "llvm/InlineAsm.h"
22#include "llvm/Instructions.h"
23#include "llvm/IntrinsicInst.h"
24#include "llvm/Pass.h"
25#include "llvm/Analysis/Dominators.h"
26#include "llvm/Analysis/InstructionSimplify.h"
27#include "llvm/Analysis/ProfileInfo.h"
28#include "llvm/Target/TargetData.h"
29#include "llvm/Target/TargetLowering.h"
30#include "llvm/Transforms/Utils/AddrModeMatcher.h"
31#include "llvm/Transforms/Utils/BasicBlockUtils.h"
32#include "llvm/Transforms/Utils/Local.h"
33#include "llvm/Transforms/Utils/BuildLibCalls.h"
34#include "llvm/ADT/DenseMap.h"
35#include "llvm/ADT/SmallSet.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/Assembly/Writer.h"
38#include "llvm/Support/CallSite.h"
39#include "llvm/Support/CommandLine.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Support/GetElementPtrTypeIterator.h"
42#include "llvm/Support/PatternMatch.h"
43#include "llvm/Support/raw_ostream.h"
44#include "llvm/Support/IRBuilder.h"
45#include "llvm/Support/ValueHandle.h"
46using namespace llvm;
47using namespace llvm::PatternMatch;
48
49STATISTIC(NumBlocksElim, "Number of blocks eliminated");
50STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
51STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
52STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
53                      "sunken Cmps");
54STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
55                       "of sunken Casts");
56STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
57                          "computations were sunk");
58STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
59STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
60
61namespace {
62  class CodeGenPrepare : public FunctionPass {
63    /// TLI - Keep a pointer of a TargetLowering to consult for determining
64    /// transformation profitability.
65    const TargetLowering *TLI;
66    DominatorTree *DT;
67    ProfileInfo *PFI;
68
69    /// CurInstIterator - As we scan instructions optimizing them, this is the
70    /// next instruction to optimize.  Xforms that can invalidate this should
71    /// update it.
72    BasicBlock::iterator CurInstIterator;
73
74    // Keeps track of non-local addresses that have been sunk into a block. This
75    // allows us to avoid inserting duplicate code for blocks with multiple
76    // load/stores of the same address.
77    DenseMap<Value*, Value*> SunkAddrs;
78
79  public:
80    static char ID; // Pass identification, replacement for typeid
81    explicit CodeGenPrepare(const TargetLowering *tli = 0)
82      : FunctionPass(ID), TLI(tli) {
83        initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
84      }
85    bool runOnFunction(Function &F);
86
87    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
88      AU.addPreserved<DominatorTree>();
89      AU.addPreserved<ProfileInfo>();
90    }
91
92  private:
93    bool EliminateMostlyEmptyBlocks(Function &F);
94    bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
95    void EliminateMostlyEmptyBlock(BasicBlock *BB);
96    bool OptimizeBlock(BasicBlock &BB);
97    bool OptimizeInst(Instruction *I);
98    bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy);
99    bool OptimizeInlineAsmInst(CallInst *CS);
100    bool OptimizeCallInst(CallInst *CI);
101    bool MoveExtToFormExtLoad(Instruction *I);
102    bool OptimizeExtUses(Instruction *I);
103  };
104}
105
106char CodeGenPrepare::ID = 0;
107INITIALIZE_PASS(CodeGenPrepare, "codegenprepare",
108                "Optimize for code generation", false, false)
109
110FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
111  return new CodeGenPrepare(TLI);
112}
113
114bool CodeGenPrepare::runOnFunction(Function &F) {
115  bool EverMadeChange = false;
116
117  DT = getAnalysisIfAvailable<DominatorTree>();
118  PFI = getAnalysisIfAvailable<ProfileInfo>();
119  // First pass, eliminate blocks that contain only PHI nodes and an
120  // unconditional branch.
121  EverMadeChange |= EliminateMostlyEmptyBlocks(F);
122
123  bool MadeChange = true;
124  while (MadeChange) {
125    MadeChange = false;
126    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
127      MadeChange |= OptimizeBlock(*BB);
128    EverMadeChange |= MadeChange;
129  }
130
131  SunkAddrs.clear();
132
133  return EverMadeChange;
134}
135
136/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes,
137/// debug info directives, and an unconditional branch.  Passes before isel
138/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for
139/// isel.  Start by eliminating these blocks so we can split them the way we
140/// want them.
141bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
142  bool MadeChange = false;
143  // Note that this intentionally skips the entry block.
144  for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
145    BasicBlock *BB = I++;
146
147    // If this block doesn't end with an uncond branch, ignore it.
148    BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
149    if (!BI || !BI->isUnconditional())
150      continue;
151
152    // If the instruction before the branch (skipping debug info) isn't a phi
153    // node, then other stuff is happening here.
154    BasicBlock::iterator BBI = BI;
155    if (BBI != BB->begin()) {
156      --BBI;
157      while (isa<DbgInfoIntrinsic>(BBI)) {
158        if (BBI == BB->begin())
159          break;
160        --BBI;
161      }
162      if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
163        continue;
164    }
165
166    // Do not break infinite loops.
167    BasicBlock *DestBB = BI->getSuccessor(0);
168    if (DestBB == BB)
169      continue;
170
171    if (!CanMergeBlocks(BB, DestBB))
172      continue;
173
174    EliminateMostlyEmptyBlock(BB);
175    MadeChange = true;
176  }
177  return MadeChange;
178}
179
180/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
181/// single uncond branch between them, and BB contains no other non-phi
182/// instructions.
183bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
184                                    const BasicBlock *DestBB) const {
185  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
186  // the successor.  If there are more complex condition (e.g. preheaders),
187  // don't mess around with them.
188  BasicBlock::const_iterator BBI = BB->begin();
189  while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
190    for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
191         UI != E; ++UI) {
192      const Instruction *User = cast<Instruction>(*UI);
193      if (User->getParent() != DestBB || !isa<PHINode>(User))
194        return false;
195      // If User is inside DestBB block and it is a PHINode then check
196      // incoming value. If incoming value is not from BB then this is
197      // a complex condition (e.g. preheaders) we want to avoid here.
198      if (User->getParent() == DestBB) {
199        if (const PHINode *UPN = dyn_cast<PHINode>(User))
200          for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
201            Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
202            if (Insn && Insn->getParent() == BB &&
203                Insn->getParent() != UPN->getIncomingBlock(I))
204              return false;
205          }
206      }
207    }
208  }
209
210  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
211  // and DestBB may have conflicting incoming values for the block.  If so, we
212  // can't merge the block.
213  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
214  if (!DestBBPN) return true;  // no conflict.
215
216  // Collect the preds of BB.
217  SmallPtrSet<const BasicBlock*, 16> BBPreds;
218  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
219    // It is faster to get preds from a PHI than with pred_iterator.
220    for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
221      BBPreds.insert(BBPN->getIncomingBlock(i));
222  } else {
223    BBPreds.insert(pred_begin(BB), pred_end(BB));
224  }
225
226  // Walk the preds of DestBB.
227  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
228    BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
229    if (BBPreds.count(Pred)) {   // Common predecessor?
230      BBI = DestBB->begin();
231      while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
232        const Value *V1 = PN->getIncomingValueForBlock(Pred);
233        const Value *V2 = PN->getIncomingValueForBlock(BB);
234
235        // If V2 is a phi node in BB, look up what the mapped value will be.
236        if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
237          if (V2PN->getParent() == BB)
238            V2 = V2PN->getIncomingValueForBlock(Pred);
239
240        // If there is a conflict, bail out.
241        if (V1 != V2) return false;
242      }
243    }
244  }
245
246  return true;
247}
248
249
250/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
251/// an unconditional branch in it.
252void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
253  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
254  BasicBlock *DestBB = BI->getSuccessor(0);
255
256  DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
257
258  // If the destination block has a single pred, then this is a trivial edge,
259  // just collapse it.
260  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
261    if (SinglePred != DestBB) {
262      // Remember if SinglePred was the entry block of the function.  If so, we
263      // will need to move BB back to the entry position.
264      bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
265      MergeBasicBlockIntoOnlyPred(DestBB, this);
266
267      if (isEntry && BB != &BB->getParent()->getEntryBlock())
268        BB->moveBefore(&BB->getParent()->getEntryBlock());
269
270      DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
271      return;
272    }
273  }
274
275  // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
276  // to handle the new incoming edges it is about to have.
277  PHINode *PN;
278  for (BasicBlock::iterator BBI = DestBB->begin();
279       (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
280    // Remove the incoming value for BB, and remember it.
281    Value *InVal = PN->removeIncomingValue(BB, false);
282
283    // Two options: either the InVal is a phi node defined in BB or it is some
284    // value that dominates BB.
285    PHINode *InValPhi = dyn_cast<PHINode>(InVal);
286    if (InValPhi && InValPhi->getParent() == BB) {
287      // Add all of the input values of the input PHI as inputs of this phi.
288      for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
289        PN->addIncoming(InValPhi->getIncomingValue(i),
290                        InValPhi->getIncomingBlock(i));
291    } else {
292      // Otherwise, add one instance of the dominating value for each edge that
293      // we will be adding.
294      if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
295        for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
296          PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
297      } else {
298        for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
299          PN->addIncoming(InVal, *PI);
300      }
301    }
302  }
303
304  // The PHIs are now updated, change everything that refers to BB to use
305  // DestBB and remove BB.
306  BB->replaceAllUsesWith(DestBB);
307  if (DT) {
308    BasicBlock *BBIDom  = DT->getNode(BB)->getIDom()->getBlock();
309    BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock();
310    BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom);
311    DT->changeImmediateDominator(DestBB, NewIDom);
312    DT->eraseNode(BB);
313  }
314  if (PFI) {
315    PFI->replaceAllUses(BB, DestBB);
316    PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB));
317  }
318  BB->eraseFromParent();
319  ++NumBlocksElim;
320
321  DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
322}
323
324/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
325/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
326/// sink it into user blocks to reduce the number of virtual
327/// registers that must be created and coalesced.
328///
329/// Return true if any changes are made.
330///
331static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
332  // If this is a noop copy,
333  EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
334  EVT DstVT = TLI.getValueType(CI->getType());
335
336  // This is an fp<->int conversion?
337  if (SrcVT.isInteger() != DstVT.isInteger())
338    return false;
339
340  // If this is an extension, it will be a zero or sign extension, which
341  // isn't a noop.
342  if (SrcVT.bitsLT(DstVT)) return false;
343
344  // If these values will be promoted, find out what they will be promoted
345  // to.  This helps us consider truncates on PPC as noop copies when they
346  // are.
347  if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
348    SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
349  if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
350    DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
351
352  // If, after promotion, these are the same types, this is a noop copy.
353  if (SrcVT != DstVT)
354    return false;
355
356  BasicBlock *DefBB = CI->getParent();
357
358  /// InsertedCasts - Only insert a cast in each block once.
359  DenseMap<BasicBlock*, CastInst*> InsertedCasts;
360
361  bool MadeChange = false;
362  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
363       UI != E; ) {
364    Use &TheUse = UI.getUse();
365    Instruction *User = cast<Instruction>(*UI);
366
367    // Figure out which BB this cast is used in.  For PHI's this is the
368    // appropriate predecessor block.
369    BasicBlock *UserBB = User->getParent();
370    if (PHINode *PN = dyn_cast<PHINode>(User)) {
371      UserBB = PN->getIncomingBlock(UI);
372    }
373
374    // Preincrement use iterator so we don't invalidate it.
375    ++UI;
376
377    // If this user is in the same block as the cast, don't change the cast.
378    if (UserBB == DefBB) continue;
379
380    // If we have already inserted a cast into this block, use it.
381    CastInst *&InsertedCast = InsertedCasts[UserBB];
382
383    if (!InsertedCast) {
384      BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
385
386      InsertedCast =
387        CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
388                         InsertPt);
389      MadeChange = true;
390    }
391
392    // Replace a use of the cast with a use of the new cast.
393    TheUse = InsertedCast;
394    ++NumCastUses;
395  }
396
397  // If we removed all uses, nuke the cast.
398  if (CI->use_empty()) {
399    CI->eraseFromParent();
400    MadeChange = true;
401  }
402
403  return MadeChange;
404}
405
406/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
407/// the number of virtual registers that must be created and coalesced.  This is
408/// a clear win except on targets with multiple condition code registers
409///  (PowerPC), where it might lose; some adjustment may be wanted there.
410///
411/// Return true if any changes are made.
412static bool OptimizeCmpExpression(CmpInst *CI) {
413  BasicBlock *DefBB = CI->getParent();
414
415  /// InsertedCmp - Only insert a cmp in each block once.
416  DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
417
418  bool MadeChange = false;
419  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
420       UI != E; ) {
421    Use &TheUse = UI.getUse();
422    Instruction *User = cast<Instruction>(*UI);
423
424    // Preincrement use iterator so we don't invalidate it.
425    ++UI;
426
427    // Don't bother for PHI nodes.
428    if (isa<PHINode>(User))
429      continue;
430
431    // Figure out which BB this cmp is used in.
432    BasicBlock *UserBB = User->getParent();
433
434    // If this user is in the same block as the cmp, don't change the cmp.
435    if (UserBB == DefBB) continue;
436
437    // If we have already inserted a cmp into this block, use it.
438    CmpInst *&InsertedCmp = InsertedCmps[UserBB];
439
440    if (!InsertedCmp) {
441      BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
442
443      InsertedCmp =
444        CmpInst::Create(CI->getOpcode(),
445                        CI->getPredicate(),  CI->getOperand(0),
446                        CI->getOperand(1), "", InsertPt);
447      MadeChange = true;
448    }
449
450    // Replace a use of the cmp with a use of the new cmp.
451    TheUse = InsertedCmp;
452    ++NumCmpUses;
453  }
454
455  // If we removed all uses, nuke the cmp.
456  if (CI->use_empty())
457    CI->eraseFromParent();
458
459  return MadeChange;
460}
461
462namespace {
463class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls {
464protected:
465  void replaceCall(Value *With) {
466    CI->replaceAllUsesWith(With);
467    CI->eraseFromParent();
468  }
469  bool isFoldable(unsigned SizeCIOp, unsigned, bool) const {
470      if (ConstantInt *SizeCI =
471                             dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp)))
472        return SizeCI->isAllOnesValue();
473    return false;
474  }
475};
476} // end anonymous namespace
477
478bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
479  BasicBlock *BB = CI->getParent();
480
481  // Lower inline assembly if we can.
482  // If we found an inline asm expession, and if the target knows how to
483  // lower it to normal LLVM code, do so now.
484  if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
485    if (TLI->ExpandInlineAsm(CI)) {
486      // Avoid invalidating the iterator.
487      CurInstIterator = BB->begin();
488      // Avoid processing instructions out of order, which could cause
489      // reuse before a value is defined.
490      SunkAddrs.clear();
491      return true;
492    }
493    // Sink address computing for memory operands into the block.
494    if (OptimizeInlineAsmInst(CI))
495      return true;
496  }
497
498  // Lower all uses of llvm.objectsize.*
499  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
500  if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
501    bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
502    const Type *ReturnTy = CI->getType();
503    Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
504
505    // Substituting this can cause recursive simplifications, which can
506    // invalidate our iterator.  Use a WeakVH to hold onto it in case this
507    // happens.
508    WeakVH IterHandle(CurInstIterator);
509
510    ReplaceAndSimplifyAllUses(CI, RetVal, TLI ? TLI->getTargetData() : 0, DT);
511
512    // If the iterator instruction was recursively deleted, start over at the
513    // start of the block.
514    if (IterHandle != CurInstIterator) {
515      CurInstIterator = BB->begin();
516      SunkAddrs.clear();
517    }
518    return true;
519  }
520
521  // From here on out we're working with named functions.
522  if (CI->getCalledFunction() == 0) return false;
523
524  // We'll need TargetData from here on out.
525  const TargetData *TD = TLI ? TLI->getTargetData() : 0;
526  if (!TD) return false;
527
528  // Lower all default uses of _chk calls.  This is very similar
529  // to what InstCombineCalls does, but here we are only lowering calls
530  // that have the default "don't know" as the objectsize.  Anything else
531  // should be left alone.
532  CodeGenPrepareFortifiedLibCalls Simplifier;
533  return Simplifier.fold(CI, TD);
534}
535
536//===----------------------------------------------------------------------===//
537// Memory Optimization
538//===----------------------------------------------------------------------===//
539
540/// IsNonLocalValue - Return true if the specified values are defined in a
541/// different basic block than BB.
542static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
543  if (Instruction *I = dyn_cast<Instruction>(V))
544    return I->getParent() != BB;
545  return false;
546}
547
548/// OptimizeMemoryInst - Load and Store Instructions often have
549/// addressing modes that can do significant amounts of computation.  As such,
550/// instruction selection will try to get the load or store to do as much
551/// computation as possible for the program.  The problem is that isel can only
552/// see within a single block.  As such, we sink as much legal addressing mode
553/// stuff into the block as possible.
554///
555/// This method is used to optimize both load/store and inline asms with memory
556/// operands.
557bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
558                                        const Type *AccessTy) {
559  Value *Repl = Addr;
560
561  // Try to collapse single-value PHI nodes.  This is necessary to undo
562  // unprofitable PRE transformations.
563  SmallVector<Value*, 8> worklist;
564  SmallPtrSet<Value*, 16> Visited;
565  worklist.push_back(Addr);
566
567  // Use a worklist to iteratively look through PHI nodes, and ensure that
568  // the addressing mode obtained from the non-PHI roots of the graph
569  // are equivalent.
570  Value *Consensus = 0;
571  unsigned NumUsesConsensus = 0;
572  bool IsNumUsesConsensusValid = false;
573  SmallVector<Instruction*, 16> AddrModeInsts;
574  ExtAddrMode AddrMode;
575  while (!worklist.empty()) {
576    Value *V = worklist.back();
577    worklist.pop_back();
578
579    // Break use-def graph loops.
580    if (Visited.count(V)) {
581      Consensus = 0;
582      break;
583    }
584
585    Visited.insert(V);
586
587    // For a PHI node, push all of its incoming values.
588    if (PHINode *P = dyn_cast<PHINode>(V)) {
589      for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
590        worklist.push_back(P->getIncomingValue(i));
591      continue;
592    }
593
594    // For non-PHIs, determine the addressing mode being computed.
595    SmallVector<Instruction*, 16> NewAddrModeInsts;
596    ExtAddrMode NewAddrMode =
597      AddressingModeMatcher::Match(V, AccessTy,MemoryInst,
598                                   NewAddrModeInsts, *TLI);
599
600    // This check is broken into two cases with very similar code to avoid using
601    // getNumUses() as much as possible. Some values have a lot of uses, so
602    // calling getNumUses() unconditionally caused a significant compile-time
603    // regression.
604    if (!Consensus) {
605      Consensus = V;
606      AddrMode = NewAddrMode;
607      AddrModeInsts = NewAddrModeInsts;
608      continue;
609    } else if (NewAddrMode == AddrMode) {
610      if (!IsNumUsesConsensusValid) {
611        NumUsesConsensus = Consensus->getNumUses();
612        IsNumUsesConsensusValid = true;
613      }
614
615      // Ensure that the obtained addressing mode is equivalent to that obtained
616      // for all other roots of the PHI traversal.  Also, when choosing one
617      // such root as representative, select the one with the most uses in order
618      // to keep the cost modeling heuristics in AddressingModeMatcher
619      // applicable.
620      unsigned NumUses = V->getNumUses();
621      if (NumUses > NumUsesConsensus) {
622        Consensus = V;
623        NumUsesConsensus = NumUses;
624        AddrModeInsts = NewAddrModeInsts;
625      }
626      continue;
627    }
628
629    Consensus = 0;
630    break;
631  }
632
633  // If the addressing mode couldn't be determined, or if multiple different
634  // ones were determined, bail out now.
635  if (!Consensus) return false;
636
637  // Check to see if any of the instructions supersumed by this addr mode are
638  // non-local to I's BB.
639  bool AnyNonLocal = false;
640  for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
641    if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
642      AnyNonLocal = true;
643      break;
644    }
645  }
646
647  // If all the instructions matched are already in this BB, don't do anything.
648  if (!AnyNonLocal) {
649    DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode << "\n");
650    return false;
651  }
652
653  // Insert this computation right after this user.  Since our caller is
654  // scanning from the top of the BB to the bottom, reuse of the expr are
655  // guaranteed to happen later.
656  BasicBlock::iterator InsertPt = MemoryInst;
657
658  // Now that we determined the addressing expression we want to use and know
659  // that we have to sink it into this block.  Check to see if we have already
660  // done this for some other load/store instr in this block.  If so, reuse the
661  // computation.
662  Value *&SunkAddr = SunkAddrs[Addr];
663  if (SunkAddr) {
664    DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
665                 << *MemoryInst);
666    if (SunkAddr->getType() != Addr->getType())
667      SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt);
668  } else {
669    DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
670                 << *MemoryInst);
671    const Type *IntPtrTy =
672          TLI->getTargetData()->getIntPtrType(AccessTy->getContext());
673
674    Value *Result = 0;
675
676    // Start with the base register. Do this first so that subsequent address
677    // matching finds it last, which will prevent it from trying to match it
678    // as the scaled value in case it happens to be a mul. That would be
679    // problematic if we've sunk a different mul for the scale, because then
680    // we'd end up sinking both muls.
681    if (AddrMode.BaseReg) {
682      Value *V = AddrMode.BaseReg;
683      if (V->getType()->isPointerTy())
684        V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt);
685      if (V->getType() != IntPtrTy)
686        V = CastInst::CreateIntegerCast(V, IntPtrTy, /*isSigned=*/true,
687                                        "sunkaddr", InsertPt);
688      Result = V;
689    }
690
691    // Add the scale value.
692    if (AddrMode.Scale) {
693      Value *V = AddrMode.ScaledReg;
694      if (V->getType() == IntPtrTy) {
695        // done.
696      } else if (V->getType()->isPointerTy()) {
697        V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt);
698      } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
699                 cast<IntegerType>(V->getType())->getBitWidth()) {
700        V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt);
701      } else {
702        V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt);
703      }
704      if (AddrMode.Scale != 1)
705        V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy,
706                                                                AddrMode.Scale),
707                                      "sunkaddr", InsertPt);
708      if (Result)
709        Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
710      else
711        Result = V;
712    }
713
714    // Add in the BaseGV if present.
715    if (AddrMode.BaseGV) {
716      Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr",
717                                  InsertPt);
718      if (Result)
719        Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
720      else
721        Result = V;
722    }
723
724    // Add in the Base Offset if present.
725    if (AddrMode.BaseOffs) {
726      Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
727      if (Result)
728        Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
729      else
730        Result = V;
731    }
732
733    if (Result == 0)
734      SunkAddr = Constant::getNullValue(Addr->getType());
735    else
736      SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt);
737  }
738
739  MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
740
741  if (Repl->use_empty()) {
742    RecursivelyDeleteTriviallyDeadInstructions(Repl);
743    // This address is now available for reassignment, so erase the table entry;
744    // we don't want to match some completely different instruction.
745    SunkAddrs[Addr] = 0;
746  }
747  ++NumMemoryInsts;
748  return true;
749}
750
751/// OptimizeInlineAsmInst - If there are any memory operands, use
752/// OptimizeMemoryInst to sink their address computing into the block when
753/// possible / profitable.
754bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
755  bool MadeChange = false;
756
757  TargetLowering::AsmOperandInfoVector
758    TargetConstraints = TLI->ParseConstraints(CS);
759  unsigned ArgNo = 0;
760  for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
761    TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
762
763    // Compute the constraint code and ConstraintType to use.
764    TLI->ComputeConstraintToUse(OpInfo, SDValue());
765
766    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
767        OpInfo.isIndirect) {
768      Value *OpVal = CS->getArgOperand(ArgNo++);
769      MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
770    } else if (OpInfo.Type == InlineAsm::isInput)
771      ArgNo++;
772  }
773
774  return MadeChange;
775}
776
777/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same
778/// basic block as the load, unless conditions are unfavorable. This allows
779/// SelectionDAG to fold the extend into the load.
780///
781bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
782  // Look for a load being extended.
783  LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0));
784  if (!LI) return false;
785
786  // If they're already in the same block, there's nothing to do.
787  if (LI->getParent() == I->getParent())
788    return false;
789
790  // If the load has other users and the truncate is not free, this probably
791  // isn't worthwhile.
792  if (!LI->hasOneUse() &&
793      TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) ||
794              !TLI->isTypeLegal(TLI->getValueType(I->getType()))) &&
795      !TLI->isTruncateFree(I->getType(), LI->getType()))
796    return false;
797
798  // Check whether the target supports casts folded into loads.
799  unsigned LType;
800  if (isa<ZExtInst>(I))
801    LType = ISD::ZEXTLOAD;
802  else {
803    assert(isa<SExtInst>(I) && "Unexpected ext type!");
804    LType = ISD::SEXTLOAD;
805  }
806  if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType())))
807    return false;
808
809  // Move the extend into the same block as the load, so that SelectionDAG
810  // can fold it.
811  I->removeFromParent();
812  I->insertAfter(LI);
813  ++NumExtsMoved;
814  return true;
815}
816
817bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
818  BasicBlock *DefBB = I->getParent();
819
820  // If the result of a {s|z}ext and its source are both live out, rewrite all
821  // other uses of the source with result of extension.
822  Value *Src = I->getOperand(0);
823  if (Src->hasOneUse())
824    return false;
825
826  // Only do this xform if truncating is free.
827  if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
828    return false;
829
830  // Only safe to perform the optimization if the source is also defined in
831  // this block.
832  if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
833    return false;
834
835  bool DefIsLiveOut = false;
836  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
837       UI != E; ++UI) {
838    Instruction *User = cast<Instruction>(*UI);
839
840    // Figure out which BB this ext is used in.
841    BasicBlock *UserBB = User->getParent();
842    if (UserBB == DefBB) continue;
843    DefIsLiveOut = true;
844    break;
845  }
846  if (!DefIsLiveOut)
847    return false;
848
849  // Make sure non of the uses are PHI nodes.
850  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
851       UI != E; ++UI) {
852    Instruction *User = cast<Instruction>(*UI);
853    BasicBlock *UserBB = User->getParent();
854    if (UserBB == DefBB) continue;
855    // Be conservative. We don't want this xform to end up introducing
856    // reloads just before load / store instructions.
857    if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
858      return false;
859  }
860
861  // InsertedTruncs - Only insert one trunc in each block once.
862  DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
863
864  bool MadeChange = false;
865  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
866       UI != E; ++UI) {
867    Use &TheUse = UI.getUse();
868    Instruction *User = cast<Instruction>(*UI);
869
870    // Figure out which BB this ext is used in.
871    BasicBlock *UserBB = User->getParent();
872    if (UserBB == DefBB) continue;
873
874    // Both src and def are live in this block. Rewrite the use.
875    Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
876
877    if (!InsertedTrunc) {
878      BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
879
880      InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
881    }
882
883    // Replace a use of the {s|z}ext source with a use of the result.
884    TheUse = InsertedTrunc;
885    ++NumExtUses;
886    MadeChange = true;
887  }
888
889  return MadeChange;
890}
891
892bool CodeGenPrepare::OptimizeInst(Instruction *I) {
893  if (PHINode *P = dyn_cast<PHINode>(I)) {
894    // It is possible for very late stage optimizations (such as SimplifyCFG)
895    // to introduce PHI nodes too late to be cleaned up.  If we detect such a
896    // trivial PHI, go ahead and zap it here.
897    if (Value *V = SimplifyInstruction(P)) {
898      P->replaceAllUsesWith(V);
899      P->eraseFromParent();
900      ++NumPHIsElim;
901      return true;
902    }
903    return false;
904  }
905
906  if (CastInst *CI = dyn_cast<CastInst>(I)) {
907    // If the source of the cast is a constant, then this should have
908    // already been constant folded.  The only reason NOT to constant fold
909    // it is if something (e.g. LSR) was careful to place the constant
910    // evaluation in a block other than then one that uses it (e.g. to hoist
911    // the address of globals out of a loop).  If this is the case, we don't
912    // want to forward-subst the cast.
913    if (isa<Constant>(CI->getOperand(0)))
914      return false;
915
916    if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
917      return true;
918
919    if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
920      bool MadeChange = MoveExtToFormExtLoad(I);
921      return MadeChange | OptimizeExtUses(I);
922    }
923    return false;
924  }
925
926  if (CmpInst *CI = dyn_cast<CmpInst>(I))
927    return OptimizeCmpExpression(CI);
928
929  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
930    if (TLI)
931      return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
932    return false;
933  }
934
935  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
936    if (TLI)
937      return OptimizeMemoryInst(I, SI->getOperand(1),
938                                SI->getOperand(0)->getType());
939    return false;
940  }
941
942  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
943    if (GEPI->hasAllZeroIndices()) {
944      /// The GEP operand must be a pointer, so must its result -> BitCast
945      Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
946                                        GEPI->getName(), GEPI);
947      GEPI->replaceAllUsesWith(NC);
948      GEPI->eraseFromParent();
949      ++NumGEPsElim;
950      OptimizeInst(NC);
951      return true;
952    }
953    return false;
954  }
955
956  if (CallInst *CI = dyn_cast<CallInst>(I))
957    return OptimizeCallInst(CI);
958
959  return false;
960}
961
962// In this pass we look for GEP and cast instructions that are used
963// across basic blocks and rewrite them to improve basic-block-at-a-time
964// selection.
965bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
966  SunkAddrs.clear();
967  bool MadeChange = false;
968
969  CurInstIterator = BB.begin();
970  for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; )
971    MadeChange |= OptimizeInst(CurInstIterator++);
972
973  return MadeChange;
974}
975