CodeGenPrepare.cpp revision f5102a0f088e7c96f7028bf7ca1c24975c314fff
1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass munges the code in the input function to better prepare it for
11// SelectionDAG-based code generation. This works around limitations in it's
12// basic-block-at-a-time approach. It should eventually be removed.
13//
14//===----------------------------------------------------------------------===//
15
16#define DEBUG_TYPE "codegenprepare"
17#include "llvm/Transforms/Scalar.h"
18#include "llvm/Constants.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Function.h"
21#include "llvm/InlineAsm.h"
22#include "llvm/Instructions.h"
23#include "llvm/Pass.h"
24#include "llvm/Target/TargetAsmInfo.h"
25#include "llvm/Target/TargetData.h"
26#include "llvm/Target/TargetLowering.h"
27#include "llvm/Target/TargetMachine.h"
28#include "llvm/Transforms/Utils/BasicBlockUtils.h"
29#include "llvm/Transforms/Utils/Local.h"
30#include "llvm/ADT/DenseMap.h"
31#include "llvm/ADT/SmallSet.h"
32#include "llvm/Support/CallSite.h"
33#include "llvm/Support/Compiler.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/GetElementPtrTypeIterator.h"
36#include "llvm/Support/PatternMatch.h"
37using namespace llvm;
38using namespace llvm::PatternMatch;
39
40namespace {
41  class VISIBILITY_HIDDEN CodeGenPrepare : public FunctionPass {
42    /// TLI - Keep a pointer of a TargetLowering to consult for determining
43    /// transformation profitability.
44    const TargetLowering *TLI;
45  public:
46    static char ID; // Pass identification, replacement for typeid
47    explicit CodeGenPrepare(const TargetLowering *tli = 0)
48      : FunctionPass(&ID), TLI(tli) {}
49    bool runOnFunction(Function &F);
50
51  private:
52    bool EliminateMostlyEmptyBlocks(Function &F);
53    bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
54    void EliminateMostlyEmptyBlock(BasicBlock *BB);
55    bool OptimizeBlock(BasicBlock &BB);
56    bool OptimizeMemoryInst(Instruction *I, Value *Addr, const Type *AccessTy,
57                            DenseMap<Value*,Value*> &SunkAddrs);
58    bool OptimizeInlineAsmInst(Instruction *I, CallSite CS,
59                               DenseMap<Value*,Value*> &SunkAddrs);
60    bool OptimizeExtUses(Instruction *I);
61  };
62}
63
64char CodeGenPrepare::ID = 0;
65static RegisterPass<CodeGenPrepare> X("codegenprepare",
66                                      "Optimize for code generation");
67
68FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
69  return new CodeGenPrepare(TLI);
70}
71
72
73bool CodeGenPrepare::runOnFunction(Function &F) {
74  bool EverMadeChange = false;
75
76  // First pass, eliminate blocks that contain only PHI nodes and an
77  // unconditional branch.
78  EverMadeChange |= EliminateMostlyEmptyBlocks(F);
79
80  bool MadeChange = true;
81  while (MadeChange) {
82    MadeChange = false;
83    for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
84      MadeChange |= OptimizeBlock(*BB);
85    EverMadeChange |= MadeChange;
86  }
87  return EverMadeChange;
88}
89
90/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes
91/// and an unconditional branch.  Passes before isel (e.g. LSR/loopsimplify)
92/// often split edges in ways that are non-optimal for isel.  Start by
93/// eliminating these blocks so we can split them the way we want them.
94bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
95  bool MadeChange = false;
96  // Note that this intentionally skips the entry block.
97  for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
98    BasicBlock *BB = I++;
99
100    // If this block doesn't end with an uncond branch, ignore it.
101    BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
102    if (!BI || !BI->isUnconditional())
103      continue;
104
105    // If the instruction before the branch isn't a phi node, then other stuff
106    // is happening here.
107    BasicBlock::iterator BBI = BI;
108    if (BBI != BB->begin()) {
109      --BBI;
110      if (!isa<PHINode>(BBI)) continue;
111    }
112
113    // Do not break infinite loops.
114    BasicBlock *DestBB = BI->getSuccessor(0);
115    if (DestBB == BB)
116      continue;
117
118    if (!CanMergeBlocks(BB, DestBB))
119      continue;
120
121    EliminateMostlyEmptyBlock(BB);
122    MadeChange = true;
123  }
124  return MadeChange;
125}
126
127/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
128/// single uncond branch between them, and BB contains no other non-phi
129/// instructions.
130bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
131                                    const BasicBlock *DestBB) const {
132  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
133  // the successor.  If there are more complex condition (e.g. preheaders),
134  // don't mess around with them.
135  BasicBlock::const_iterator BBI = BB->begin();
136  while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
137    for (Value::use_const_iterator UI = PN->use_begin(), E = PN->use_end();
138         UI != E; ++UI) {
139      const Instruction *User = cast<Instruction>(*UI);
140      if (User->getParent() != DestBB || !isa<PHINode>(User))
141        return false;
142      // If User is inside DestBB block and it is a PHINode then check
143      // incoming value. If incoming value is not from BB then this is
144      // a complex condition (e.g. preheaders) we want to avoid here.
145      if (User->getParent() == DestBB) {
146        if (const PHINode *UPN = dyn_cast<PHINode>(User))
147          for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
148            Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
149            if (Insn && Insn->getParent() == BB &&
150                Insn->getParent() != UPN->getIncomingBlock(I))
151              return false;
152          }
153      }
154    }
155  }
156
157  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
158  // and DestBB may have conflicting incoming values for the block.  If so, we
159  // can't merge the block.
160  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
161  if (!DestBBPN) return true;  // no conflict.
162
163  // Collect the preds of BB.
164  SmallPtrSet<const BasicBlock*, 16> BBPreds;
165  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
166    // It is faster to get preds from a PHI than with pred_iterator.
167    for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
168      BBPreds.insert(BBPN->getIncomingBlock(i));
169  } else {
170    BBPreds.insert(pred_begin(BB), pred_end(BB));
171  }
172
173  // Walk the preds of DestBB.
174  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
175    BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
176    if (BBPreds.count(Pred)) {   // Common predecessor?
177      BBI = DestBB->begin();
178      while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
179        const Value *V1 = PN->getIncomingValueForBlock(Pred);
180        const Value *V2 = PN->getIncomingValueForBlock(BB);
181
182        // If V2 is a phi node in BB, look up what the mapped value will be.
183        if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
184          if (V2PN->getParent() == BB)
185            V2 = V2PN->getIncomingValueForBlock(Pred);
186
187        // If there is a conflict, bail out.
188        if (V1 != V2) return false;
189      }
190    }
191  }
192
193  return true;
194}
195
196
197/// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
198/// an unconditional branch in it.
199void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
200  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
201  BasicBlock *DestBB = BI->getSuccessor(0);
202
203  DOUT << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB;
204
205  // If the destination block has a single pred, then this is a trivial edge,
206  // just collapse it.
207  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
208    if (SinglePred != DestBB) {
209      // Remember if SinglePred was the entry block of the function.  If so, we
210      // will need to move BB back to the entry position.
211      bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
212      MergeBasicBlockIntoOnlyPred(DestBB);
213
214      if (isEntry && BB != &BB->getParent()->getEntryBlock())
215        BB->moveBefore(&BB->getParent()->getEntryBlock());
216
217      DOUT << "AFTER:\n" << *DestBB << "\n\n\n";
218      return;
219    }
220  }
221
222  // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
223  // to handle the new incoming edges it is about to have.
224  PHINode *PN;
225  for (BasicBlock::iterator BBI = DestBB->begin();
226       (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
227    // Remove the incoming value for BB, and remember it.
228    Value *InVal = PN->removeIncomingValue(BB, false);
229
230    // Two options: either the InVal is a phi node defined in BB or it is some
231    // value that dominates BB.
232    PHINode *InValPhi = dyn_cast<PHINode>(InVal);
233    if (InValPhi && InValPhi->getParent() == BB) {
234      // Add all of the input values of the input PHI as inputs of this phi.
235      for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
236        PN->addIncoming(InValPhi->getIncomingValue(i),
237                        InValPhi->getIncomingBlock(i));
238    } else {
239      // Otherwise, add one instance of the dominating value for each edge that
240      // we will be adding.
241      if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
242        for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
243          PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
244      } else {
245        for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
246          PN->addIncoming(InVal, *PI);
247      }
248    }
249  }
250
251  // The PHIs are now updated, change everything that refers to BB to use
252  // DestBB and remove BB.
253  BB->replaceAllUsesWith(DestBB);
254  BB->eraseFromParent();
255
256  DOUT << "AFTER:\n" << *DestBB << "\n\n\n";
257}
258
259
260/// SplitEdgeNicely - Split the critical edge from TI to its specified
261/// successor if it will improve codegen.  We only do this if the successor has
262/// phi nodes (otherwise critical edges are ok).  If there is already another
263/// predecessor of the succ that is empty (and thus has no phi nodes), use it
264/// instead of introducing a new block.
265static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) {
266  BasicBlock *TIBB = TI->getParent();
267  BasicBlock *Dest = TI->getSuccessor(SuccNum);
268  assert(isa<PHINode>(Dest->begin()) &&
269         "This should only be called if Dest has a PHI!");
270
271  // As a hack, never split backedges of loops.  Even though the copy for any
272  // PHIs inserted on the backedge would be dead for exits from the loop, we
273  // assume that the cost of *splitting* the backedge would be too high.
274  if (Dest == TIBB)
275    return;
276
277  /// TIPHIValues - This array is lazily computed to determine the values of
278  /// PHIs in Dest that TI would provide.
279  SmallVector<Value*, 32> TIPHIValues;
280
281  // Check to see if Dest has any blocks that can be used as a split edge for
282  // this terminator.
283  for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) {
284    BasicBlock *Pred = *PI;
285    // To be usable, the pred has to end with an uncond branch to the dest.
286    BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator());
287    if (!PredBr || !PredBr->isUnconditional() ||
288        // Must be empty other than the branch.
289        &Pred->front() != PredBr ||
290        // Cannot be the entry block; its label does not get emitted.
291        Pred == &(Dest->getParent()->getEntryBlock()))
292      continue;
293
294    // Finally, since we know that Dest has phi nodes in it, we have to make
295    // sure that jumping to Pred will have the same affect as going to Dest in
296    // terms of PHI values.
297    PHINode *PN;
298    unsigned PHINo = 0;
299    bool FoundMatch = true;
300    for (BasicBlock::iterator I = Dest->begin();
301         (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) {
302      if (PHINo == TIPHIValues.size())
303        TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB));
304
305      // If the PHI entry doesn't work, we can't use this pred.
306      if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) {
307        FoundMatch = false;
308        break;
309      }
310    }
311
312    // If we found a workable predecessor, change TI to branch to Succ.
313    if (FoundMatch) {
314      Dest->removePredecessor(TIBB);
315      TI->setSuccessor(SuccNum, Pred);
316      return;
317    }
318  }
319
320  SplitCriticalEdge(TI, SuccNum, P, true);
321}
322
323/// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
324/// copy (e.g. it's casting from one pointer type to another, int->uint, or
325/// int->sbyte on PPC), sink it into user blocks to reduce the number of virtual
326/// registers that must be created and coalesced.
327///
328/// Return true if any changes are made.
329///
330static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
331  // If this is a noop copy,
332  MVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
333  MVT DstVT = TLI.getValueType(CI->getType());
334
335  // This is an fp<->int conversion?
336  if (SrcVT.isInteger() != DstVT.isInteger())
337    return false;
338
339  // If this is an extension, it will be a zero or sign extension, which
340  // isn't a noop.
341  if (SrcVT.bitsLT(DstVT)) return false;
342
343  // If these values will be promoted, find out what they will be promoted
344  // to.  This helps us consider truncates on PPC as noop copies when they
345  // are.
346  if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
347    SrcVT = TLI.getTypeToTransformTo(SrcVT);
348  if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
349    DstVT = TLI.getTypeToTransformTo(DstVT);
350
351  // If, after promotion, these are the same types, this is a noop copy.
352  if (SrcVT != DstVT)
353    return false;
354
355  BasicBlock *DefBB = CI->getParent();
356
357  /// InsertedCasts - Only insert a cast in each block once.
358  DenseMap<BasicBlock*, CastInst*> InsertedCasts;
359
360  bool MadeChange = false;
361  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
362       UI != E; ) {
363    Use &TheUse = UI.getUse();
364    Instruction *User = cast<Instruction>(*UI);
365
366    // Figure out which BB this cast is used in.  For PHI's this is the
367    // appropriate predecessor block.
368    BasicBlock *UserBB = User->getParent();
369    if (PHINode *PN = dyn_cast<PHINode>(User)) {
370      unsigned OpVal = UI.getOperandNo()/2;
371      UserBB = PN->getIncomingBlock(OpVal);
372    }
373
374    // Preincrement use iterator so we don't invalidate it.
375    ++UI;
376
377    // If this user is in the same block as the cast, don't change the cast.
378    if (UserBB == DefBB) continue;
379
380    // If we have already inserted a cast into this block, use it.
381    CastInst *&InsertedCast = InsertedCasts[UserBB];
382
383    if (!InsertedCast) {
384      BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
385
386      InsertedCast =
387        CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
388                         InsertPt);
389      MadeChange = true;
390    }
391
392    // Replace a use of the cast with a use of the new cast.
393    TheUse = InsertedCast;
394  }
395
396  // If we removed all uses, nuke the cast.
397  if (CI->use_empty()) {
398    CI->eraseFromParent();
399    MadeChange = true;
400  }
401
402  return MadeChange;
403}
404
405/// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
406/// the number of virtual registers that must be created and coalesced.  This is
407/// a clear win except on targets with multiple condition code registers
408///  (PowerPC), where it might lose; some adjustment may be wanted there.
409///
410/// Return true if any changes are made.
411static bool OptimizeCmpExpression(CmpInst *CI) {
412  BasicBlock *DefBB = CI->getParent();
413
414  /// InsertedCmp - Only insert a cmp in each block once.
415  DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
416
417  bool MadeChange = false;
418  for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
419       UI != E; ) {
420    Use &TheUse = UI.getUse();
421    Instruction *User = cast<Instruction>(*UI);
422
423    // Preincrement use iterator so we don't invalidate it.
424    ++UI;
425
426    // Don't bother for PHI nodes.
427    if (isa<PHINode>(User))
428      continue;
429
430    // Figure out which BB this cmp is used in.
431    BasicBlock *UserBB = User->getParent();
432
433    // If this user is in the same block as the cmp, don't change the cmp.
434    if (UserBB == DefBB) continue;
435
436    // If we have already inserted a cmp into this block, use it.
437    CmpInst *&InsertedCmp = InsertedCmps[UserBB];
438
439    if (!InsertedCmp) {
440      BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
441
442      InsertedCmp =
443        CmpInst::Create(CI->getOpcode(), CI->getPredicate(), CI->getOperand(0),
444                        CI->getOperand(1), "", InsertPt);
445      MadeChange = true;
446    }
447
448    // Replace a use of the cmp with a use of the new cmp.
449    TheUse = InsertedCmp;
450  }
451
452  // If we removed all uses, nuke the cmp.
453  if (CI->use_empty())
454    CI->eraseFromParent();
455
456  return MadeChange;
457}
458
459//===----------------------------------------------------------------------===//
460// Addressing Mode Analysis and Optimization
461//===----------------------------------------------------------------------===//
462
463namespace {
464  /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode
465  /// which holds actual Value*'s for register values.
466  struct ExtAddrMode : public TargetLowering::AddrMode {
467    Value *BaseReg;
468    Value *ScaledReg;
469    ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
470    void print(OStream &OS) const;
471    void dump() const {
472      print(cerr);
473      cerr << '\n';
474    }
475  };
476} // end anonymous namespace
477
478static inline OStream &operator<<(OStream &OS, const ExtAddrMode &AM) {
479  AM.print(OS);
480  return OS;
481}
482
483void ExtAddrMode::print(OStream &OS) const {
484  bool NeedPlus = false;
485  OS << "[";
486  if (BaseGV)
487    OS << (NeedPlus ? " + " : "")
488       << "GV:%" << BaseGV->getName(), NeedPlus = true;
489
490  if (BaseOffs)
491    OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
492
493  if (BaseReg)
494    OS << (NeedPlus ? " + " : "")
495       << "Base:%" << BaseReg->getName(), NeedPlus = true;
496  if (Scale)
497    OS << (NeedPlus ? " + " : "")
498       << Scale << "*%" << ScaledReg->getName(), NeedPlus = true;
499
500  OS << ']';
501}
502
503namespace {
504/// AddressingModeMatcher - This class exposes a single public method, which is
505/// used to construct a "maximal munch" of the addressing mode for the target
506/// specified by TLI for an access to "V" with an access type of AccessTy.  This
507/// returns the addressing mode that is actually matched by value, but also
508/// returns the list of instructions involved in that addressing computation in
509/// AddrModeInsts.
510class AddressingModeMatcher {
511  SmallVectorImpl<Instruction*> &AddrModeInsts;
512  const TargetLowering &TLI;
513
514  /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
515  /// the memory instruction that we're computing this address for.
516  const Type *AccessTy;
517  Instruction *MemoryInst;
518
519  /// AddrMode - This is the addressing mode that we're building up.  This is
520  /// part of the return value of this addressing mode matching stuff.
521  ExtAddrMode &AddrMode;
522
523  /// IgnoreProfitability - This is set to true when we should not do
524  /// profitability checks.  When true, IsProfitableToFoldIntoAddressingMode
525  /// always returns true.
526  bool IgnoreProfitability;
527
528  AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI,
529                        const TargetLowering &T, const Type *AT,
530                        Instruction *MI, ExtAddrMode &AM)
531    : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) {
532    IgnoreProfitability = false;
533  }
534public:
535
536  /// Match - Find the maximal addressing mode that a load/store of V can fold,
537  /// give an access type of AccessTy.  This returns a list of involved
538  /// instructions in AddrModeInsts.
539  static ExtAddrMode Match(Value *V, const Type *AccessTy,
540                           Instruction *MemoryInst,
541                           SmallVectorImpl<Instruction*> &AddrModeInsts,
542                           const TargetLowering &TLI) {
543    ExtAddrMode Result;
544
545    bool Success =
546      AddressingModeMatcher(AddrModeInsts, TLI, AccessTy,
547                            MemoryInst, Result).MatchAddr(V, 0);
548    Success = Success; assert(Success && "Couldn't select *anything*?");
549    return Result;
550  }
551private:
552  bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
553  bool MatchAddr(Value *V, unsigned Depth);
554  bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth);
555  bool IsProfitableToFoldIntoAddressingMode(Instruction *I,
556                                            ExtAddrMode &AMBefore,
557                                            ExtAddrMode &AMAfter);
558  bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
559};
560} // end anonymous namespace
561
562/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
563/// Return true and update AddrMode if this addr mode is legal for the target,
564/// false if not.
565bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
566                                             unsigned Depth) {
567  // If Scale is 1, then this is the same as adding ScaleReg to the addressing
568  // mode.  Just process that directly.
569  if (Scale == 1)
570    return MatchAddr(ScaleReg, Depth);
571
572  // If the scale is 0, it takes nothing to add this.
573  if (Scale == 0)
574    return true;
575
576  // If we already have a scale of this value, we can add to it, otherwise, we
577  // need an available scale field.
578  if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
579    return false;
580
581  ExtAddrMode TestAddrMode = AddrMode;
582
583  // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
584  // [A+B + A*7] -> [B+A*8].
585  TestAddrMode.Scale += Scale;
586  TestAddrMode.ScaledReg = ScaleReg;
587
588  // If the new address isn't legal, bail out.
589  if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
590    return false;
591
592  // It was legal, so commit it.
593  AddrMode = TestAddrMode;
594
595  // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
596  // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
597  // X*Scale + C*Scale to addr mode.
598  ConstantInt *CI; Value *AddLHS;
599  if (match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
600    TestAddrMode.ScaledReg = AddLHS;
601    TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
602
603    // If this addressing mode is legal, commit it and remember that we folded
604    // this instruction.
605    if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
606      AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
607      AddrMode = TestAddrMode;
608      return true;
609    }
610  }
611
612  // Otherwise, not (x+c)*scale, just return what we have.
613  return true;
614}
615
616/// MightBeFoldableInst - This is a little filter, which returns true if an
617/// addressing computation involving I might be folded into a load/store
618/// accessing it.  This doesn't need to be perfect, but needs to accept at least
619/// the set of instructions that MatchOperationAddr can.
620static bool MightBeFoldableInst(Instruction *I) {
621  switch (I->getOpcode()) {
622  case Instruction::BitCast:
623    // Don't touch identity bitcasts.
624    if (I->getType() == I->getOperand(0)->getType())
625      return false;
626    return isa<PointerType>(I->getType()) || isa<IntegerType>(I->getType());
627  case Instruction::PtrToInt:
628    // PtrToInt is always a noop, as we know that the int type is pointer sized.
629    return true;
630  case Instruction::IntToPtr:
631    // We know the input is intptr_t, so this is foldable.
632    return true;
633  case Instruction::Add:
634    return true;
635  case Instruction::Mul:
636  case Instruction::Shl:
637    // Can only handle X*C and X << C.
638    return isa<ConstantInt>(I->getOperand(1));
639  case Instruction::GetElementPtr:
640    return true;
641  default:
642    return false;
643  }
644}
645
646
647/// MatchOperationAddr - Given an instruction or constant expr, see if we can
648/// fold the operation into the addressing mode.  If so, update the addressing
649/// mode and return true, otherwise return false without modifying AddrMode.
650bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
651                                               unsigned Depth) {
652  // Avoid exponential behavior on extremely deep expression trees.
653  if (Depth >= 5) return false;
654
655  switch (Opcode) {
656  case Instruction::PtrToInt:
657    // PtrToInt is always a noop, as we know that the int type is pointer sized.
658    return MatchAddr(AddrInst->getOperand(0), Depth);
659  case Instruction::IntToPtr:
660    // This inttoptr is a no-op if the integer type is pointer sized.
661    if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
662        TLI.getPointerTy())
663      return MatchAddr(AddrInst->getOperand(0), Depth);
664    return false;
665  case Instruction::BitCast:
666    // BitCast is always a noop, and we can handle it as long as it is
667    // int->int or pointer->pointer (we don't want int<->fp or something).
668    if ((isa<PointerType>(AddrInst->getOperand(0)->getType()) ||
669         isa<IntegerType>(AddrInst->getOperand(0)->getType())) &&
670        // Don't touch identity bitcasts.  These were probably put here by LSR,
671        // and we don't want to mess around with them.  Assume it knows what it
672        // is doing.
673        AddrInst->getOperand(0)->getType() != AddrInst->getType())
674      return MatchAddr(AddrInst->getOperand(0), Depth);
675    return false;
676  case Instruction::Add: {
677    // Check to see if we can merge in the RHS then the LHS.  If so, we win.
678    ExtAddrMode BackupAddrMode = AddrMode;
679    unsigned OldSize = AddrModeInsts.size();
680    if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
681        MatchAddr(AddrInst->getOperand(0), Depth+1))
682      return true;
683
684    // Restore the old addr mode info.
685    AddrMode = BackupAddrMode;
686    AddrModeInsts.resize(OldSize);
687
688    // Otherwise this was over-aggressive.  Try merging in the LHS then the RHS.
689    if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
690        MatchAddr(AddrInst->getOperand(1), Depth+1))
691      return true;
692
693    // Otherwise we definitely can't merge the ADD in.
694    AddrMode = BackupAddrMode;
695    AddrModeInsts.resize(OldSize);
696    break;
697  }
698  //case Instruction::Or:
699  // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
700  //break;
701  case Instruction::Mul:
702  case Instruction::Shl: {
703    // Can only handle X*C and X << C.
704    ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
705    if (!RHS) return false;
706    int64_t Scale = RHS->getSExtValue();
707    if (Opcode == Instruction::Shl)
708      Scale = 1 << Scale;
709
710    return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
711  }
712  case Instruction::GetElementPtr: {
713    // Scan the GEP.  We check it if it contains constant offsets and at most
714    // one variable offset.
715    int VariableOperand = -1;
716    unsigned VariableScale = 0;
717
718    int64_t ConstantOffset = 0;
719    const TargetData *TD = TLI.getTargetData();
720    gep_type_iterator GTI = gep_type_begin(AddrInst);
721    for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
722      if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
723        const StructLayout *SL = TD->getStructLayout(STy);
724        unsigned Idx =
725          cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
726        ConstantOffset += SL->getElementOffset(Idx);
727      } else {
728        uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType());
729        if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
730          ConstantOffset += CI->getSExtValue()*TypeSize;
731        } else if (TypeSize) {  // Scales of zero don't do anything.
732          // We only allow one variable index at the moment.
733          if (VariableOperand != -1)
734            return false;
735
736          // Remember the variable index.
737          VariableOperand = i;
738          VariableScale = TypeSize;
739        }
740      }
741    }
742
743    // A common case is for the GEP to only do a constant offset.  In this case,
744    // just add it to the disp field and check validity.
745    if (VariableOperand == -1) {
746      AddrMode.BaseOffs += ConstantOffset;
747      if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
748        // Check to see if we can fold the base pointer in too.
749        if (MatchAddr(AddrInst->getOperand(0), Depth+1))
750          return true;
751      }
752      AddrMode.BaseOffs -= ConstantOffset;
753      return false;
754    }
755
756    // Save the valid addressing mode in case we can't match.
757    ExtAddrMode BackupAddrMode = AddrMode;
758
759    // Check that this has no base reg yet.  If so, we won't have a place to
760    // put the base of the GEP (assuming it is not a null ptr).
761    bool SetBaseReg = true;
762    if (isa<ConstantPointerNull>(AddrInst->getOperand(0)))
763      SetBaseReg = false;   // null pointer base doesn't need representation.
764    else if (AddrMode.HasBaseReg)
765      return false;  // Base register already specified, can't match GEP.
766    else {
767      // Otherwise, we'll use the GEP base as the BaseReg.
768      AddrMode.HasBaseReg = true;
769      AddrMode.BaseReg = AddrInst->getOperand(0);
770    }
771
772    // See if the scale and offset amount is valid for this target.
773    AddrMode.BaseOffs += ConstantOffset;
774
775    if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
776                          Depth)) {
777      AddrMode = BackupAddrMode;
778      return false;
779    }
780
781    // If we have a null as the base of the GEP, folding in the constant offset
782    // plus variable scale is all we can do.
783    if (!SetBaseReg) return true;
784
785    // If this match succeeded, we know that we can form an address with the
786    // GepBase as the basereg.  Match the base pointer of the GEP more
787    // aggressively by zeroing out BaseReg and rematching.  If the base is
788    // (for example) another GEP, this allows merging in that other GEP into
789    // the addressing mode we're forming.
790    AddrMode.HasBaseReg = false;
791    AddrMode.BaseReg = 0;
792    bool Success = MatchAddr(AddrInst->getOperand(0), Depth+1);
793    assert(Success && "MatchAddr should be able to fill in BaseReg!");
794    Success=Success;
795    return true;
796  }
797  }
798  return false;
799}
800
801/// MatchAddr - If we can, try to add the value of 'Addr' into the current
802/// addressing mode.  If Addr can't be added to AddrMode this returns false and
803/// leaves AddrMode unmodified.  This assumes that Addr is either a pointer type
804/// or intptr_t for the target.
805///
806bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
807  if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
808    // Fold in immediates if legal for the target.
809    AddrMode.BaseOffs += CI->getSExtValue();
810    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
811      return true;
812    AddrMode.BaseOffs -= CI->getSExtValue();
813  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
814    // If this is a global variable, try to fold it into the addressing mode.
815    if (AddrMode.BaseGV == 0) {
816      AddrMode.BaseGV = GV;
817      if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
818        return true;
819      AddrMode.BaseGV = 0;
820    }
821  } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
822    ExtAddrMode BackupAddrMode = AddrMode;
823    unsigned OldSize = AddrModeInsts.size();
824
825    // Check to see if it is possible to fold this operation.
826    if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
827      // Okay, it's possible to fold this.  Check to see if it is actually
828      // *profitable* to do so.  We use a simple cost model to avoid increasing
829      // register pressure too much.
830      if (I->hasOneUse() ||
831          IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
832        AddrModeInsts.push_back(I);
833        return true;
834      }
835
836      // It isn't profitable to do this, roll back.
837      //cerr << "NOT FOLDING: " << *I;
838      AddrMode = BackupAddrMode;
839      AddrModeInsts.resize(OldSize);
840    }
841  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
842    if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
843      return true;
844  } else if (isa<ConstantPointerNull>(Addr)) {
845    // Null pointer gets folded without affecting the addressing mode.
846    return true;
847  }
848
849  // Worse case, the target should support [reg] addressing modes. :)
850  if (!AddrMode.HasBaseReg) {
851    AddrMode.HasBaseReg = true;
852    AddrMode.BaseReg = Addr;
853    // Still check for legality in case the target supports [imm] but not [i+r].
854    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
855      return true;
856    AddrMode.HasBaseReg = false;
857    AddrMode.BaseReg = 0;
858  }
859
860  // If the base register is already taken, see if we can do [r+r].
861  if (AddrMode.Scale == 0) {
862    AddrMode.Scale = 1;
863    AddrMode.ScaledReg = Addr;
864    if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
865      return true;
866    AddrMode.Scale = 0;
867    AddrMode.ScaledReg = 0;
868  }
869  // Couldn't match.
870  return false;
871}
872
873
874/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
875/// inline asm call are due to memory operands.  If so, return true, otherwise
876/// return false.
877static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
878                                    const TargetLowering &TLI) {
879  std::vector<InlineAsm::ConstraintInfo>
880  Constraints = IA->ParseConstraints();
881
882  unsigned ArgNo = 1;   // ArgNo - The operand of the CallInst.
883  for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
884    TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
885
886    // Compute the value type for each operand.
887    switch (OpInfo.Type) {
888      case InlineAsm::isOutput:
889        if (OpInfo.isIndirect)
890          OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
891        break;
892      case InlineAsm::isInput:
893        OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
894        break;
895      case InlineAsm::isClobber:
896        // Nothing to do.
897        break;
898    }
899
900    // Compute the constraint code and ConstraintType to use.
901    TLI.ComputeConstraintToUse(OpInfo, SDValue(),
902                             OpInfo.ConstraintType == TargetLowering::C_Memory);
903
904    // If this asm operand is our Value*, and if it isn't an indirect memory
905    // operand, we can't fold it!
906    if (OpInfo.CallOperandVal == OpVal &&
907        (OpInfo.ConstraintType != TargetLowering::C_Memory ||
908         !OpInfo.isIndirect))
909      return false;
910  }
911
912  return true;
913}
914
915
916/// FindAllMemoryUses - Recursively walk all the uses of I until we find a
917/// memory use.  If we find an obviously non-foldable instruction, return true.
918/// Add the ultimately found memory instructions to MemoryUses.
919static bool FindAllMemoryUses(Instruction *I,
920                SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
921                              SmallPtrSet<Instruction*, 16> &ConsideredInsts,
922                              const TargetLowering &TLI) {
923  // If we already considered this instruction, we're done.
924  if (!ConsideredInsts.insert(I))
925    return false;
926
927  // If this is an obviously unfoldable instruction, bail out.
928  if (!MightBeFoldableInst(I))
929    return true;
930
931  // Loop over all the uses, recursively processing them.
932  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
933       UI != E; ++UI) {
934    if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
935      MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
936      continue;
937    }
938
939    if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
940      if (UI.getOperandNo() == 0) return true; // Storing addr, not into addr.
941      MemoryUses.push_back(std::make_pair(SI, UI.getOperandNo()));
942      continue;
943    }
944
945    if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
946      InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
947      if (IA == 0) return true;
948
949      // If this is a memory operand, we're cool, otherwise bail out.
950      if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
951        return true;
952      continue;
953    }
954
955    if (FindAllMemoryUses(cast<Instruction>(*UI), MemoryUses, ConsideredInsts,
956                          TLI))
957      return true;
958  }
959
960  return false;
961}
962
963
964/// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
965/// the use site that we're folding it into.  If so, there is no cost to
966/// include it in the addressing mode.  KnownLive1 and KnownLive2 are two values
967/// that we know are live at the instruction already.
968bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
969                                                   Value *KnownLive2) {
970  // If Val is either of the known-live values, we know it is live!
971  if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
972    return true;
973
974  // All values other than instructions and arguments (e.g. constants) are live.
975  if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
976
977  // If Val is a constant sized alloca in the entry block, it is live, this is
978  // true because it is just a reference to the stack/frame pointer, which is
979  // live for the whole function.
980  if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
981    if (AI->isStaticAlloca())
982      return true;
983
984  // Check to see if this value is already used in the memory instruction's
985  // block.  If so, it's already live into the block at the very least, so we
986  // can reasonably fold it.
987  BasicBlock *MemBB = MemoryInst->getParent();
988  for (Value::use_iterator UI = Val->use_begin(), E = Val->use_end();
989       UI != E; ++UI)
990    // We know that uses of arguments and instructions have to be instructions.
991    if (cast<Instruction>(*UI)->getParent() == MemBB)
992      return true;
993
994  return false;
995}
996
997
998
999/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
1000/// mode of the machine to fold the specified instruction into a load or store
1001/// that ultimately uses it.  However, the specified instruction has multiple
1002/// uses.  Given this, it may actually increase register pressure to fold it
1003/// into the load.  For example, consider this code:
1004///
1005///     X = ...
1006///     Y = X+1
1007///     use(Y)   -> nonload/store
1008///     Z = Y+1
1009///     load Z
1010///
1011/// In this case, Y has multiple uses, and can be folded into the load of Z
1012/// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
1013/// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
1014/// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
1015/// number of computations either.
1016///
1017/// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
1018/// X was live across 'load Z' for other reasons, we actually *would* want to
1019/// fold the addressing mode in the Z case.  This would make Y die earlier.
1020bool AddressingModeMatcher::
1021IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
1022                                     ExtAddrMode &AMAfter) {
1023  if (IgnoreProfitability) return true;
1024
1025  // AMBefore is the addressing mode before this instruction was folded into it,
1026  // and AMAfter is the addressing mode after the instruction was folded.  Get
1027  // the set of registers referenced by AMAfter and subtract out those
1028  // referenced by AMBefore: this is the set of values which folding in this
1029  // address extends the lifetime of.
1030  //
1031  // Note that there are only two potential values being referenced here,
1032  // BaseReg and ScaleReg (global addresses are always available, as are any
1033  // folded immediates).
1034  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
1035
1036  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
1037  // lifetime wasn't extended by adding this instruction.
1038  if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
1039    BaseReg = 0;
1040  if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
1041    ScaledReg = 0;
1042
1043  // If folding this instruction (and it's subexprs) didn't extend any live
1044  // ranges, we're ok with it.
1045  if (BaseReg == 0 && ScaledReg == 0)
1046    return true;
1047
1048  // If all uses of this instruction are ultimately load/store/inlineasm's,
1049  // check to see if their addressing modes will include this instruction.  If
1050  // so, we can fold it into all uses, so it doesn't matter if it has multiple
1051  // uses.
1052  SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
1053  SmallPtrSet<Instruction*, 16> ConsideredInsts;
1054  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
1055    return false;  // Has a non-memory, non-foldable use!
1056
1057  // Now that we know that all uses of this instruction are part of a chain of
1058  // computation involving only operations that could theoretically be folded
1059  // into a memory use, loop over each of these uses and see if they could
1060  // *actually* fold the instruction.
1061  SmallVector<Instruction*, 32> MatchedAddrModeInsts;
1062  for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
1063    Instruction *User = MemoryUses[i].first;
1064    unsigned OpNo = MemoryUses[i].second;
1065
1066    // Get the access type of this use.  If the use isn't a pointer, we don't
1067    // know what it accesses.
1068    Value *Address = User->getOperand(OpNo);
1069    if (!isa<PointerType>(Address->getType()))
1070      return false;
1071    const Type *AddressAccessTy =
1072      cast<PointerType>(Address->getType())->getElementType();
1073
1074    // Do a match against the root of this address, ignoring profitability. This
1075    // will tell us if the addressing mode for the memory operation will
1076    // *actually* cover the shared instruction.
1077    ExtAddrMode Result;
1078    AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
1079                                  MemoryInst, Result);
1080    Matcher.IgnoreProfitability = true;
1081    bool Success = Matcher.MatchAddr(Address, 0);
1082    Success = Success; assert(Success && "Couldn't select *anything*?");
1083
1084    // If the match didn't cover I, then it won't be shared by it.
1085    if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
1086                  I) == MatchedAddrModeInsts.end())
1087      return false;
1088
1089    MatchedAddrModeInsts.clear();
1090  }
1091
1092  return true;
1093}
1094
1095
1096//===----------------------------------------------------------------------===//
1097// Memory Optimization
1098//===----------------------------------------------------------------------===//
1099
1100/// IsNonLocalValue - Return true if the specified values are defined in a
1101/// different basic block than BB.
1102static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
1103  if (Instruction *I = dyn_cast<Instruction>(V))
1104    return I->getParent() != BB;
1105  return false;
1106}
1107
1108/// OptimizeMemoryInst - Load and Store Instructions have often have
1109/// addressing modes that can do significant amounts of computation.  As such,
1110/// instruction selection will try to get the load or store to do as much
1111/// computation as possible for the program.  The problem is that isel can only
1112/// see within a single block.  As such, we sink as much legal addressing mode
1113/// stuff into the block as possible.
1114///
1115/// This method is used to optimize both load/store and inline asms with memory
1116/// operands.
1117bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
1118                                        const Type *AccessTy,
1119                                        DenseMap<Value*,Value*> &SunkAddrs) {
1120  // Figure out what addressing mode will be built up for this operation.
1121  SmallVector<Instruction*, 16> AddrModeInsts;
1122  ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst,
1123                                                      AddrModeInsts, *TLI);
1124
1125  // Check to see if any of the instructions supersumed by this addr mode are
1126  // non-local to I's BB.
1127  bool AnyNonLocal = false;
1128  for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
1129    if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
1130      AnyNonLocal = true;
1131      break;
1132    }
1133  }
1134
1135  // If all the instructions matched are already in this BB, don't do anything.
1136  if (!AnyNonLocal) {
1137    DEBUG(cerr << "CGP: Found      local addrmode: " << AddrMode << "\n");
1138    return false;
1139  }
1140
1141  // Insert this computation right after this user.  Since our caller is
1142  // scanning from the top of the BB to the bottom, reuse of the expr are
1143  // guaranteed to happen later.
1144  BasicBlock::iterator InsertPt = MemoryInst;
1145
1146  // Now that we determined the addressing expression we want to use and know
1147  // that we have to sink it into this block.  Check to see if we have already
1148  // done this for some other load/store instr in this block.  If so, reuse the
1149  // computation.
1150  Value *&SunkAddr = SunkAddrs[Addr];
1151  if (SunkAddr) {
1152    DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n");
1153    if (SunkAddr->getType() != Addr->getType())
1154      SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt);
1155  } else {
1156    DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n");
1157    const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType();
1158
1159    Value *Result = 0;
1160    // Start with the scale value.
1161    if (AddrMode.Scale) {
1162      Value *V = AddrMode.ScaledReg;
1163      if (V->getType() == IntPtrTy) {
1164        // done.
1165      } else if (isa<PointerType>(V->getType())) {
1166        V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt);
1167      } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
1168                 cast<IntegerType>(V->getType())->getBitWidth()) {
1169        V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt);
1170      } else {
1171        V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt);
1172      }
1173      if (AddrMode.Scale != 1)
1174        V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy,
1175                                                          AddrMode.Scale),
1176                                      "sunkaddr", InsertPt);
1177      Result = V;
1178    }
1179
1180    // Add in the base register.
1181    if (AddrMode.BaseReg) {
1182      Value *V = AddrMode.BaseReg;
1183      if (V->getType() != IntPtrTy)
1184        V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt);
1185      if (Result)
1186        Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
1187      else
1188        Result = V;
1189    }
1190
1191    // Add in the BaseGV if present.
1192    if (AddrMode.BaseGV) {
1193      Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr",
1194                                  InsertPt);
1195      if (Result)
1196        Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
1197      else
1198        Result = V;
1199    }
1200
1201    // Add in the Base Offset if present.
1202    if (AddrMode.BaseOffs) {
1203      Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
1204      if (Result)
1205        Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
1206      else
1207        Result = V;
1208    }
1209
1210    if (Result == 0)
1211      SunkAddr = Constant::getNullValue(Addr->getType());
1212    else
1213      SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt);
1214  }
1215
1216  MemoryInst->replaceUsesOfWith(Addr, SunkAddr);
1217
1218  if (Addr->use_empty())
1219    RecursivelyDeleteTriviallyDeadInstructions(Addr);
1220  return true;
1221}
1222
1223/// OptimizeInlineAsmInst - If there are any memory operands, use
1224/// OptimizeMemoryInst to sink their address computing into the block when
1225/// possible / profitable.
1226bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
1227                                           DenseMap<Value*,Value*> &SunkAddrs) {
1228  bool MadeChange = false;
1229  InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
1230
1231  // Do a prepass over the constraints, canonicalizing them, and building up the
1232  // ConstraintOperands list.
1233  std::vector<InlineAsm::ConstraintInfo>
1234    ConstraintInfos = IA->ParseConstraints();
1235
1236  /// ConstraintOperands - Information about all of the constraints.
1237  std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands;
1238  unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
1239  for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
1240    ConstraintOperands.
1241      push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i]));
1242    TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back();
1243
1244    // Compute the value type for each operand.
1245    switch (OpInfo.Type) {
1246    case InlineAsm::isOutput:
1247      if (OpInfo.isIndirect)
1248        OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
1249      break;
1250    case InlineAsm::isInput:
1251      OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
1252      break;
1253    case InlineAsm::isClobber:
1254      // Nothing to do.
1255      break;
1256    }
1257
1258    // Compute the constraint code and ConstraintType to use.
1259    TLI->ComputeConstraintToUse(OpInfo, SDValue(),
1260                             OpInfo.ConstraintType == TargetLowering::C_Memory);
1261
1262    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
1263        OpInfo.isIndirect) {
1264      Value *OpVal = OpInfo.CallOperandVal;
1265      MadeChange |= OptimizeMemoryInst(I, OpVal, OpVal->getType(), SunkAddrs);
1266    }
1267  }
1268
1269  return MadeChange;
1270}
1271
1272bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
1273  BasicBlock *DefBB = I->getParent();
1274
1275  // If both result of the {s|z}xt and its source are live out, rewrite all
1276  // other uses of the source with result of extension.
1277  Value *Src = I->getOperand(0);
1278  if (Src->hasOneUse())
1279    return false;
1280
1281  // Only do this xform if truncating is free.
1282  if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
1283    return false;
1284
1285  // Only safe to perform the optimization if the source is also defined in
1286  // this block.
1287  if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
1288    return false;
1289
1290  bool DefIsLiveOut = false;
1291  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
1292       UI != E; ++UI) {
1293    Instruction *User = cast<Instruction>(*UI);
1294
1295    // Figure out which BB this ext is used in.
1296    BasicBlock *UserBB = User->getParent();
1297    if (UserBB == DefBB) continue;
1298    DefIsLiveOut = true;
1299    break;
1300  }
1301  if (!DefIsLiveOut)
1302    return false;
1303
1304  // Make sure non of the uses are PHI nodes.
1305  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
1306       UI != E; ++UI) {
1307    Instruction *User = cast<Instruction>(*UI);
1308    BasicBlock *UserBB = User->getParent();
1309    if (UserBB == DefBB) continue;
1310    // Be conservative. We don't want this xform to end up introducing
1311    // reloads just before load / store instructions.
1312    if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
1313      return false;
1314  }
1315
1316  // InsertedTruncs - Only insert one trunc in each block once.
1317  DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
1318
1319  bool MadeChange = false;
1320  for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
1321       UI != E; ++UI) {
1322    Use &TheUse = UI.getUse();
1323    Instruction *User = cast<Instruction>(*UI);
1324
1325    // Figure out which BB this ext is used in.
1326    BasicBlock *UserBB = User->getParent();
1327    if (UserBB == DefBB) continue;
1328
1329    // Both src and def are live in this block. Rewrite the use.
1330    Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
1331
1332    if (!InsertedTrunc) {
1333      BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
1334
1335      InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
1336    }
1337
1338    // Replace a use of the {s|z}ext source with a use of the result.
1339    TheUse = InsertedTrunc;
1340
1341    MadeChange = true;
1342  }
1343
1344  return MadeChange;
1345}
1346
1347// In this pass we look for GEP and cast instructions that are used
1348// across basic blocks and rewrite them to improve basic-block-at-a-time
1349// selection.
1350bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
1351  bool MadeChange = false;
1352
1353  // Split all critical edges where the dest block has a PHI and where the phi
1354  // has shared immediate operands.
1355  TerminatorInst *BBTI = BB.getTerminator();
1356  if (BBTI->getNumSuccessors() > 1) {
1357    for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i)
1358      if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) &&
1359          isCriticalEdge(BBTI, i, true))
1360        SplitEdgeNicely(BBTI, i, this);
1361  }
1362
1363
1364  // Keep track of non-local addresses that have been sunk into this block.
1365  // This allows us to avoid inserting duplicate code for blocks with multiple
1366  // load/stores of the same address.
1367  DenseMap<Value*, Value*> SunkAddrs;
1368
1369  for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
1370    Instruction *I = BBI++;
1371
1372    if (CastInst *CI = dyn_cast<CastInst>(I)) {
1373      // If the source of the cast is a constant, then this should have
1374      // already been constant folded.  The only reason NOT to constant fold
1375      // it is if something (e.g. LSR) was careful to place the constant
1376      // evaluation in a block other than then one that uses it (e.g. to hoist
1377      // the address of globals out of a loop).  If this is the case, we don't
1378      // want to forward-subst the cast.
1379      if (isa<Constant>(CI->getOperand(0)))
1380        continue;
1381
1382      bool Change = false;
1383      if (TLI) {
1384        Change = OptimizeNoopCopyExpression(CI, *TLI);
1385        MadeChange |= Change;
1386      }
1387
1388      if (!Change && (isa<ZExtInst>(I) || isa<SExtInst>(I)))
1389        MadeChange |= OptimizeExtUses(I);
1390    } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) {
1391      MadeChange |= OptimizeCmpExpression(CI);
1392    } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1393      if (TLI)
1394        MadeChange |= OptimizeMemoryInst(I, I->getOperand(0), LI->getType(),
1395                                         SunkAddrs);
1396    } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
1397      if (TLI)
1398        MadeChange |= OptimizeMemoryInst(I, SI->getOperand(1),
1399                                         SI->getOperand(0)->getType(),
1400                                         SunkAddrs);
1401    } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1402      if (GEPI->hasAllZeroIndices()) {
1403        /// The GEP operand must be a pointer, so must its result -> BitCast
1404        Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
1405                                          GEPI->getName(), GEPI);
1406        GEPI->replaceAllUsesWith(NC);
1407        GEPI->eraseFromParent();
1408        MadeChange = true;
1409        BBI = NC;
1410      }
1411    } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
1412      // If we found an inline asm expession, and if the target knows how to
1413      // lower it to normal LLVM code, do so now.
1414      if (TLI && isa<InlineAsm>(CI->getCalledValue()))
1415        if (const TargetAsmInfo *TAI =
1416            TLI->getTargetMachine().getTargetAsmInfo()) {
1417          if (TAI->ExpandInlineAsm(CI))
1418            BBI = BB.begin();
1419          else
1420            // Sink address computing for memory operands into the block.
1421            MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
1422        }
1423    }
1424  }
1425
1426  return MadeChange;
1427}
1428