LoopStrengthReduce.cpp revision e50ed30282bb5b4a9ed952580523f2dda16215ac
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This transformation analyzes and transforms the induction variables (and
11// computations derived from them) into forms suitable for efficient execution
12// on the target.
13//
14// This pass performs a strength reduction on array references inside loops that
15// have as one or more of their components the loop induction variable, it
16// rewrites expressions to take advantage of scaled-index addressing modes
17// available on the target, and it performs a variety of other optimizations
18// related to loop induction variables.
19//
20//===----------------------------------------------------------------------===//
21
22#define DEBUG_TYPE "loop-reduce"
23#include "llvm/Transforms/Scalar.h"
24#include "llvm/Constants.h"
25#include "llvm/Instructions.h"
26#include "llvm/IntrinsicInst.h"
27#include "llvm/LLVMContext.h"
28#include "llvm/Type.h"
29#include "llvm/DerivedTypes.h"
30#include "llvm/Analysis/Dominators.h"
31#include "llvm/Analysis/IVUsers.h"
32#include "llvm/Analysis/LoopInfo.h"
33#include "llvm/Analysis/LoopPass.h"
34#include "llvm/Analysis/ScalarEvolutionExpander.h"
35#include "llvm/Transforms/Utils/AddrModeMatcher.h"
36#include "llvm/Transforms/Utils/BasicBlockUtils.h"
37#include "llvm/Transforms/Utils/Local.h"
38#include "llvm/ADT/Statistic.h"
39#include "llvm/Support/CFG.h"
40#include "llvm/Support/Debug.h"
41#include "llvm/Support/Compiler.h"
42#include "llvm/Support/CommandLine.h"
43#include "llvm/Support/ValueHandle.h"
44#include "llvm/Support/raw_ostream.h"
45#include "llvm/Target/TargetLowering.h"
46#include <algorithm>
47using namespace llvm;
48
49STATISTIC(NumReduced ,    "Number of IV uses strength reduced");
50STATISTIC(NumInserted,    "Number of PHIs inserted");
51STATISTIC(NumVariable,    "Number of PHIs with variable strides");
52STATISTIC(NumEliminated,  "Number of strides eliminated");
53STATISTIC(NumShadow,      "Number of Shadow IVs optimized");
54STATISTIC(NumImmSunk,     "Number of common expr immediates sunk into uses");
55STATISTIC(NumLoopCond,    "Number of loop terminating conds optimized");
56
57static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
58                                       cl::init(false),
59                                       cl::Hidden);
60
61namespace {
62
63  struct BasedUser;
64
65  /// IVInfo - This structure keeps track of one IV expression inserted during
66  /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
67  /// well as the PHI node and increment value created for rewrite.
68  struct VISIBILITY_HIDDEN IVExpr {
69    const SCEV *Stride;
70    const SCEV *Base;
71    PHINode    *PHI;
72
73    IVExpr(const SCEV *const stride, const SCEV *const base, PHINode *phi)
74      : Stride(stride), Base(base), PHI(phi) {}
75  };
76
77  /// IVsOfOneStride - This structure keeps track of all IV expression inserted
78  /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
79  struct VISIBILITY_HIDDEN IVsOfOneStride {
80    std::vector<IVExpr> IVs;
81
82    void addIV(const SCEV *const Stride, const SCEV *const Base, PHINode *PHI) {
83      IVs.push_back(IVExpr(Stride, Base, PHI));
84    }
85  };
86
87  class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass {
88    IVUsers *IU;
89    LoopInfo *LI;
90    DominatorTree *DT;
91    ScalarEvolution *SE;
92    bool Changed;
93
94    /// IVsByStride - Keep track of all IVs that have been inserted for a
95    /// particular stride.
96    std::map<const SCEV *, IVsOfOneStride> IVsByStride;
97
98    /// StrideNoReuse - Keep track of all the strides whose ivs cannot be
99    /// reused (nor should they be rewritten to reuse other strides).
100    SmallSet<const SCEV *, 4> StrideNoReuse;
101
102    /// DeadInsts - Keep track of instructions we may have made dead, so that
103    /// we can remove them after we are done working.
104    SmallVector<WeakVH, 16> DeadInsts;
105
106    /// TLI - Keep a pointer of a TargetLowering to consult for determining
107    /// transformation profitability.
108    const TargetLowering *TLI;
109
110  public:
111    static char ID; // Pass ID, replacement for typeid
112    explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
113      LoopPass(&ID), TLI(tli) {
114    }
115
116    bool runOnLoop(Loop *L, LPPassManager &LPM);
117
118    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
119      // We split critical edges, so we change the CFG.  However, we do update
120      // many analyses if they are around.
121      AU.addPreservedID(LoopSimplifyID);
122      AU.addPreserved<LoopInfo>();
123      AU.addPreserved<DominanceFrontier>();
124      AU.addPreserved<DominatorTree>();
125
126      AU.addRequiredID(LoopSimplifyID);
127      AU.addRequired<LoopInfo>();
128      AU.addRequired<DominatorTree>();
129      AU.addRequired<ScalarEvolution>();
130      AU.addPreserved<ScalarEvolution>();
131      AU.addRequired<IVUsers>();
132      AU.addPreserved<IVUsers>();
133    }
134
135  private:
136    ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
137                                  IVStrideUse* &CondUse,
138                                  const SCEV *const *  &CondStride);
139
140    void OptimizeIndvars(Loop *L);
141    void OptimizeLoopCountIV(Loop *L);
142    void OptimizeLoopTermCond(Loop *L);
143
144    /// OptimizeShadowIV - If IV is used in a int-to-float cast
145    /// inside the loop then try to eliminate the cast opeation.
146    void OptimizeShadowIV(Loop *L);
147
148    /// OptimizeMax - Rewrite the loop's terminating condition
149    /// if it uses a max computation.
150    ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond,
151                          IVStrideUse* &CondUse);
152
153    bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
154                           const SCEV *const * &CondStride);
155    bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
156    const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *const&,
157                             IVExpr&, const Type*,
158                             const std::vector<BasedUser>& UsersToProcess);
159    bool ValidScale(bool, int64_t,
160                    const std::vector<BasedUser>& UsersToProcess);
161    bool ValidOffset(bool, int64_t, int64_t,
162                     const std::vector<BasedUser>& UsersToProcess);
163    const SCEV *CollectIVUsers(const SCEV *const &Stride,
164                              IVUsersOfOneStride &Uses,
165                              Loop *L,
166                              bool &AllUsesAreAddresses,
167                              bool &AllUsesAreOutsideLoop,
168                              std::vector<BasedUser> &UsersToProcess);
169    bool ShouldUseFullStrengthReductionMode(
170                                const std::vector<BasedUser> &UsersToProcess,
171                                const Loop *L,
172                                bool AllUsesAreAddresses,
173                                const SCEV *Stride);
174    void PrepareToStrengthReduceFully(
175                             std::vector<BasedUser> &UsersToProcess,
176                             const SCEV *Stride,
177                             const SCEV *CommonExprs,
178                             const Loop *L,
179                             SCEVExpander &PreheaderRewriter);
180    void PrepareToStrengthReduceFromSmallerStride(
181                                         std::vector<BasedUser> &UsersToProcess,
182                                         Value *CommonBaseV,
183                                         const IVExpr &ReuseIV,
184                                         Instruction *PreInsertPt);
185    void PrepareToStrengthReduceWithNewPhi(
186                                  std::vector<BasedUser> &UsersToProcess,
187                                  const SCEV *Stride,
188                                  const SCEV *CommonExprs,
189                                  Value *CommonBaseV,
190                                  Instruction *IVIncInsertPt,
191                                  const Loop *L,
192                                  SCEVExpander &PreheaderRewriter);
193    void StrengthReduceStridedIVUsers(const SCEV *const &Stride,
194                                      IVUsersOfOneStride &Uses,
195                                      Loop *L);
196    void DeleteTriviallyDeadInstructions();
197  };
198}
199
200char LoopStrengthReduce::ID = 0;
201static RegisterPass<LoopStrengthReduce>
202X("loop-reduce", "Loop Strength Reduction");
203
204Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
205  return new LoopStrengthReduce(TLI);
206}
207
208/// DeleteTriviallyDeadInstructions - If any of the instructions is the
209/// specified set are trivially dead, delete them and see if this makes any of
210/// their operands subsequently dead.
211void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
212  if (DeadInsts.empty()) return;
213
214  while (!DeadInsts.empty()) {
215    Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.back());
216    DeadInsts.pop_back();
217
218    if (I == 0 || !isInstructionTriviallyDead(I))
219      continue;
220
221    for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
222      if (Instruction *U = dyn_cast<Instruction>(*OI)) {
223        *OI = 0;
224        if (U->use_empty())
225          DeadInsts.push_back(U);
226      }
227    }
228
229    I->eraseFromParent();
230    Changed = true;
231  }
232}
233
234/// containsAddRecFromDifferentLoop - Determine whether expression S involves a
235/// subexpression that is an AddRec from a loop other than L.  An outer loop
236/// of L is OK, but not an inner loop nor a disjoint loop.
237static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) {
238  // This is very common, put it first.
239  if (isa<SCEVConstant>(S))
240    return false;
241  if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) {
242    for (unsigned int i=0; i< AE->getNumOperands(); i++)
243      if (containsAddRecFromDifferentLoop(AE->getOperand(i), L))
244        return true;
245    return false;
246  }
247  if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) {
248    if (const Loop *newLoop = AE->getLoop()) {
249      if (newLoop == L)
250        return false;
251      // if newLoop is an outer loop of L, this is OK.
252      if (!LoopInfo::isNotAlreadyContainedIn(L, newLoop))
253        return false;
254    }
255    return true;
256  }
257  if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S))
258    return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
259           containsAddRecFromDifferentLoop(DE->getRHS(), L);
260#if 0
261  // SCEVSDivExpr has been backed out temporarily, but will be back; we'll
262  // need this when it is.
263  if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S))
264    return containsAddRecFromDifferentLoop(DE->getLHS(), L) ||
265           containsAddRecFromDifferentLoop(DE->getRHS(), L);
266#endif
267  if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S))
268    return containsAddRecFromDifferentLoop(CE->getOperand(), L);
269  return false;
270}
271
272/// isAddressUse - Returns true if the specified instruction is using the
273/// specified value as an address.
274static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
275  bool isAddress = isa<LoadInst>(Inst);
276  if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
277    if (SI->getOperand(1) == OperandVal)
278      isAddress = true;
279  } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
280    // Addressing modes can also be folded into prefetches and a variety
281    // of intrinsics.
282    switch (II->getIntrinsicID()) {
283      default: break;
284      case Intrinsic::prefetch:
285      case Intrinsic::x86_sse2_loadu_dq:
286      case Intrinsic::x86_sse2_loadu_pd:
287      case Intrinsic::x86_sse_loadu_ps:
288      case Intrinsic::x86_sse_storeu_ps:
289      case Intrinsic::x86_sse2_storeu_pd:
290      case Intrinsic::x86_sse2_storeu_dq:
291      case Intrinsic::x86_sse2_storel_dq:
292        if (II->getOperand(1) == OperandVal)
293          isAddress = true;
294        break;
295    }
296  }
297  return isAddress;
298}
299
300/// getAccessType - Return the type of the memory being accessed.
301static const Type *getAccessType(const Instruction *Inst) {
302  const Type *AccessTy = Inst->getType();
303  if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
304    AccessTy = SI->getOperand(0)->getType();
305  else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
306    // Addressing modes can also be folded into prefetches and a variety
307    // of intrinsics.
308    switch (II->getIntrinsicID()) {
309    default: break;
310    case Intrinsic::x86_sse_storeu_ps:
311    case Intrinsic::x86_sse2_storeu_pd:
312    case Intrinsic::x86_sse2_storeu_dq:
313    case Intrinsic::x86_sse2_storel_dq:
314      AccessTy = II->getOperand(1)->getType();
315      break;
316    }
317  }
318  return AccessTy;
319}
320
321namespace {
322  /// BasedUser - For a particular base value, keep information about how we've
323  /// partitioned the expression so far.
324  struct BasedUser {
325    /// SE - The current ScalarEvolution object.
326    ScalarEvolution *SE;
327
328    /// Base - The Base value for the PHI node that needs to be inserted for
329    /// this use.  As the use is processed, information gets moved from this
330    /// field to the Imm field (below).  BasedUser values are sorted by this
331    /// field.
332    const SCEV *Base;
333
334    /// Inst - The instruction using the induction variable.
335    Instruction *Inst;
336
337    /// OperandValToReplace - The operand value of Inst to replace with the
338    /// EmittedBase.
339    Value *OperandValToReplace;
340
341    /// Imm - The immediate value that should be added to the base immediately
342    /// before Inst, because it will be folded into the imm field of the
343    /// instruction.  This is also sometimes used for loop-variant values that
344    /// must be added inside the loop.
345    const SCEV *Imm;
346
347    /// Phi - The induction variable that performs the striding that
348    /// should be used for this user.
349    PHINode *Phi;
350
351    // isUseOfPostIncrementedValue - True if this should use the
352    // post-incremented version of this IV, not the preincremented version.
353    // This can only be set in special cases, such as the terminating setcc
354    // instruction for a loop and uses outside the loop that are dominated by
355    // the loop.
356    bool isUseOfPostIncrementedValue;
357
358    BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
359      : SE(se), Base(IVSU.getOffset()), Inst(IVSU.getUser()),
360        OperandValToReplace(IVSU.getOperandValToReplace()),
361        Imm(SE->getIntegerSCEV(0, Base->getType())),
362        isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {}
363
364    // Once we rewrite the code to insert the new IVs we want, update the
365    // operands of Inst to use the new expression 'NewBase', with 'Imm' added
366    // to it.
367    void RewriteInstructionToUseNewBase(const SCEV *const &NewBase,
368                                        Instruction *InsertPt,
369                                       SCEVExpander &Rewriter, Loop *L, Pass *P,
370                                        LoopInfo &LI,
371                                        SmallVectorImpl<WeakVH> &DeadInsts);
372
373    Value *InsertCodeForBaseAtPosition(const SCEV *const &NewBase,
374                                       const Type *Ty,
375                                       SCEVExpander &Rewriter,
376                                       Instruction *IP, Loop *L,
377                                       LoopInfo &LI);
378    void dump() const;
379  };
380}
381
382void BasedUser::dump() const {
383  cerr << " Base=" << *Base;
384  cerr << " Imm=" << *Imm;
385  cerr << "   Inst: " << *Inst;
386}
387
388Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *const &NewBase,
389                                              const Type *Ty,
390                                              SCEVExpander &Rewriter,
391                                              Instruction *IP, Loop *L,
392                                              LoopInfo &LI) {
393  // Figure out where we *really* want to insert this code.  In particular, if
394  // the user is inside of a loop that is nested inside of L, we really don't
395  // want to insert this expression before the user, we'd rather pull it out as
396  // many loops as possible.
397  Instruction *BaseInsertPt = IP;
398
399  // Figure out the most-nested loop that IP is in.
400  Loop *InsertLoop = LI.getLoopFor(IP->getParent());
401
402  // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out
403  // the preheader of the outer-most loop where NewBase is not loop invariant.
404  if (L->contains(IP->getParent()))
405    while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) {
406      BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator();
407      InsertLoop = InsertLoop->getParentLoop();
408    }
409
410  Value *Base = Rewriter.expandCodeFor(NewBase, 0, BaseInsertPt);
411
412  const SCEV *NewValSCEV = SE->getUnknown(Base);
413
414  // Always emit the immediate into the same block as the user.
415  NewValSCEV = SE->getAddExpr(NewValSCEV, Imm);
416
417  return Rewriter.expandCodeFor(NewValSCEV, Ty, IP);
418}
419
420
421// Once we rewrite the code to insert the new IVs we want, update the
422// operands of Inst to use the new expression 'NewBase', with 'Imm' added
423// to it. NewBasePt is the last instruction which contributes to the
424// value of NewBase in the case that it's a diffferent instruction from
425// the PHI that NewBase is computed from, or null otherwise.
426//
427void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase,
428                                               Instruction *NewBasePt,
429                                      SCEVExpander &Rewriter, Loop *L, Pass *P,
430                                      LoopInfo &LI,
431                                      SmallVectorImpl<WeakVH> &DeadInsts) {
432  if (!isa<PHINode>(Inst)) {
433    // By default, insert code at the user instruction.
434    BasicBlock::iterator InsertPt = Inst;
435
436    // However, if the Operand is itself an instruction, the (potentially
437    // complex) inserted code may be shared by many users.  Because of this, we
438    // want to emit code for the computation of the operand right before its old
439    // computation.  This is usually safe, because we obviously used to use the
440    // computation when it was computed in its current block.  However, in some
441    // cases (e.g. use of a post-incremented induction variable) the NewBase
442    // value will be pinned to live somewhere after the original computation.
443    // In this case, we have to back off.
444    //
445    // If this is a use outside the loop (which means after, since it is based
446    // on a loop indvar) we use the post-incremented value, so that we don't
447    // artificially make the preinc value live out the bottom of the loop.
448    if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) {
449      if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
450        InsertPt = NewBasePt;
451        ++InsertPt;
452      } else if (Instruction *OpInst
453                 = dyn_cast<Instruction>(OperandValToReplace)) {
454        InsertPt = OpInst;
455        while (isa<PHINode>(InsertPt)) ++InsertPt;
456      }
457    }
458    Value *NewVal = InsertCodeForBaseAtPosition(NewBase,
459                                                OperandValToReplace->getType(),
460                                                Rewriter, InsertPt, L, LI);
461    // Replace the use of the operand Value with the new Phi we just created.
462    Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
463
464    DOUT << "      Replacing with ";
465    DEBUG(WriteAsOperand(*DOUT, NewVal, /*PrintType=*/false));
466    DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
467    return;
468  }
469
470  // PHI nodes are more complex.  We have to insert one copy of the NewBase+Imm
471  // expression into each operand block that uses it.  Note that PHI nodes can
472  // have multiple entries for the same predecessor.  We use a map to make sure
473  // that a PHI node only has a single Value* for each predecessor (which also
474  // prevents us from inserting duplicate code in some blocks).
475  DenseMap<BasicBlock*, Value*> InsertedCode;
476  PHINode *PN = cast<PHINode>(Inst);
477  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
478    if (PN->getIncomingValue(i) == OperandValToReplace) {
479      // If the original expression is outside the loop, put the replacement
480      // code in the same place as the original expression,
481      // which need not be an immediate predecessor of this PHI.  This way we
482      // need only one copy of it even if it is referenced multiple times in
483      // the PHI.  We don't do this when the original expression is inside the
484      // loop because multiple copies sometimes do useful sinking of code in
485      // that case(?).
486      Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
487      if (L->contains(OldLoc->getParent())) {
488        // If this is a critical edge, split the edge so that we do not insert
489        // the code on all predecessor/successor paths.  We do this unless this
490        // is the canonical backedge for this loop, as this can make some
491        // inserted code be in an illegal position.
492        BasicBlock *PHIPred = PN->getIncomingBlock(i);
493        if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
494            (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
495
496          // First step, split the critical edge.
497          SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
498
499          // Next step: move the basic block.  In particular, if the PHI node
500          // is outside of the loop, and PredTI is in the loop, we want to
501          // move the block to be immediately before the PHI block, not
502          // immediately after PredTI.
503          if (L->contains(PHIPred) && !L->contains(PN->getParent())) {
504            BasicBlock *NewBB = PN->getIncomingBlock(i);
505            NewBB->moveBefore(PN->getParent());
506          }
507
508          // Splitting the edge can reduce the number of PHI entries we have.
509          e = PN->getNumIncomingValues();
510        }
511      }
512      Value *&Code = InsertedCode[PN->getIncomingBlock(i)];
513      if (!Code) {
514        // Insert the code into the end of the predecessor block.
515        Instruction *InsertPt = (L->contains(OldLoc->getParent())) ?
516                                PN->getIncomingBlock(i)->getTerminator() :
517                                OldLoc->getParent()->getTerminator();
518        Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(),
519                                           Rewriter, InsertPt, L, LI);
520
521        DOUT << "      Changing PHI use to ";
522        DEBUG(WriteAsOperand(*DOUT, Code, /*PrintType=*/false));
523        DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n";
524      }
525
526      // Replace the use of the operand Value with the new Phi we just created.
527      PN->setIncomingValue(i, Code);
528      Rewriter.clear();
529    }
530  }
531
532  // PHI node might have become a constant value after SplitCriticalEdge.
533  DeadInsts.push_back(Inst);
534}
535
536
537/// fitsInAddressMode - Return true if V can be subsumed within an addressing
538/// mode, and does not need to be put in a register first.
539static bool fitsInAddressMode(const SCEV *const &V, const Type *AccessTy,
540                             const TargetLowering *TLI, bool HasBaseReg) {
541  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
542    int64_t VC = SC->getValue()->getSExtValue();
543    if (TLI) {
544      TargetLowering::AddrMode AM;
545      AM.BaseOffs = VC;
546      AM.HasBaseReg = HasBaseReg;
547      return TLI->isLegalAddressingMode(AM, AccessTy);
548    } else {
549      // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
550      return (VC > -(1 << 16) && VC < (1 << 16)-1);
551    }
552  }
553
554  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
555    if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) {
556      if (TLI) {
557        TargetLowering::AddrMode AM;
558        AM.BaseGV = GV;
559        AM.HasBaseReg = HasBaseReg;
560        return TLI->isLegalAddressingMode(AM, AccessTy);
561      } else {
562        // Default: assume global addresses are not legal.
563      }
564    }
565
566  return false;
567}
568
569/// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
570/// loop varying to the Imm operand.
571static void MoveLoopVariantsToImmediateField(const SCEV *&Val, const SCEV *&Imm,
572                                             Loop *L, ScalarEvolution *SE) {
573  if (Val->isLoopInvariant(L)) return;  // Nothing to do.
574
575  if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
576    SmallVector<const SCEV *, 4> NewOps;
577    NewOps.reserve(SAE->getNumOperands());
578
579    for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
580      if (!SAE->getOperand(i)->isLoopInvariant(L)) {
581        // If this is a loop-variant expression, it must stay in the immediate
582        // field of the expression.
583        Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
584      } else {
585        NewOps.push_back(SAE->getOperand(i));
586      }
587
588    if (NewOps.empty())
589      Val = SE->getIntegerSCEV(0, Val->getType());
590    else
591      Val = SE->getAddExpr(NewOps);
592  } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
593    // Try to pull immediates out of the start value of nested addrec's.
594    const SCEV *Start = SARE->getStart();
595    MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
596
597    SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
598    Ops[0] = Start;
599    Val = SE->getAddRecExpr(Ops, SARE->getLoop());
600  } else {
601    // Otherwise, all of Val is variant, move the whole thing over.
602    Imm = SE->getAddExpr(Imm, Val);
603    Val = SE->getIntegerSCEV(0, Val->getType());
604  }
605}
606
607
608/// MoveImmediateValues - Look at Val, and pull out any additions of constants
609/// that can fit into the immediate field of instructions in the target.
610/// Accumulate these immediate values into the Imm value.
611static void MoveImmediateValues(const TargetLowering *TLI,
612                                const Type *AccessTy,
613                                const SCEV *&Val, const SCEV *&Imm,
614                                bool isAddress, Loop *L,
615                                ScalarEvolution *SE) {
616  if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
617    SmallVector<const SCEV *, 4> NewOps;
618    NewOps.reserve(SAE->getNumOperands());
619
620    for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
621      const SCEV *NewOp = SAE->getOperand(i);
622      MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE);
623
624      if (!NewOp->isLoopInvariant(L)) {
625        // If this is a loop-variant expression, it must stay in the immediate
626        // field of the expression.
627        Imm = SE->getAddExpr(Imm, NewOp);
628      } else {
629        NewOps.push_back(NewOp);
630      }
631    }
632
633    if (NewOps.empty())
634      Val = SE->getIntegerSCEV(0, Val->getType());
635    else
636      Val = SE->getAddExpr(NewOps);
637    return;
638  } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
639    // Try to pull immediates out of the start value of nested addrec's.
640    const SCEV *Start = SARE->getStart();
641    MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE);
642
643    if (Start != SARE->getStart()) {
644      SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
645      Ops[0] = Start;
646      Val = SE->getAddRecExpr(Ops, SARE->getLoop());
647    }
648    return;
649  } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
650    // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
651    if (isAddress &&
652        fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) &&
653        SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
654
655      const SCEV *SubImm = SE->getIntegerSCEV(0, Val->getType());
656      const SCEV *NewOp = SME->getOperand(1);
657      MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE);
658
659      // If we extracted something out of the subexpressions, see if we can
660      // simplify this!
661      if (NewOp != SME->getOperand(1)) {
662        // Scale SubImm up by "8".  If the result is a target constant, we are
663        // good.
664        SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
665        if (fitsInAddressMode(SubImm, AccessTy, TLI, false)) {
666          // Accumulate the immediate.
667          Imm = SE->getAddExpr(Imm, SubImm);
668
669          // Update what is left of 'Val'.
670          Val = SE->getMulExpr(SME->getOperand(0), NewOp);
671          return;
672        }
673      }
674    }
675  }
676
677  // Loop-variant expressions must stay in the immediate field of the
678  // expression.
679  if ((isAddress && fitsInAddressMode(Val, AccessTy, TLI, false)) ||
680      !Val->isLoopInvariant(L)) {
681    Imm = SE->getAddExpr(Imm, Val);
682    Val = SE->getIntegerSCEV(0, Val->getType());
683    return;
684  }
685
686  // Otherwise, no immediates to move.
687}
688
689static void MoveImmediateValues(const TargetLowering *TLI,
690                                Instruction *User,
691                                const SCEV *&Val, const SCEV *&Imm,
692                                bool isAddress, Loop *L,
693                                ScalarEvolution *SE) {
694  const Type *AccessTy = getAccessType(User);
695  MoveImmediateValues(TLI, AccessTy, Val, Imm, isAddress, L, SE);
696}
697
698/// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
699/// added together.  This is used to reassociate common addition subexprs
700/// together for maximal sharing when rewriting bases.
701static void SeparateSubExprs(SmallVector<const SCEV *, 16> &SubExprs,
702                             const SCEV *Expr,
703                             ScalarEvolution *SE) {
704  if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
705    for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
706      SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
707  } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
708    const SCEV *Zero = SE->getIntegerSCEV(0, Expr->getType());
709    if (SARE->getOperand(0) == Zero) {
710      SubExprs.push_back(Expr);
711    } else {
712      // Compute the addrec with zero as its base.
713      SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
714      Ops[0] = Zero;   // Start with zero base.
715      SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
716
717
718      SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
719    }
720  } else if (!Expr->isZero()) {
721    // Do not add zero.
722    SubExprs.push_back(Expr);
723  }
724}
725
726// This is logically local to the following function, but C++ says we have
727// to make it file scope.
728struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
729
730/// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
731/// the Uses, removing any common subexpressions, except that if all such
732/// subexpressions can be folded into an addressing mode for all uses inside
733/// the loop (this case is referred to as "free" in comments herein) we do
734/// not remove anything.  This looks for things like (a+b+c) and
735/// (a+c+d) and computes the common (a+c) subexpression.  The common expression
736/// is *removed* from the Bases and returned.
737static const SCEV *
738RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
739                                    ScalarEvolution *SE, Loop *L,
740                                    const TargetLowering *TLI) {
741  unsigned NumUses = Uses.size();
742
743  // Only one use?  This is a very common case, so we handle it specially and
744  // cheaply.
745  const SCEV *Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
746  const SCEV *Result = Zero;
747  const SCEV *FreeResult = Zero;
748  if (NumUses == 1) {
749    // If the use is inside the loop, use its base, regardless of what it is:
750    // it is clearly shared across all the IV's.  If the use is outside the loop
751    // (which means after it) we don't want to factor anything *into* the loop,
752    // so just use 0 as the base.
753    if (L->contains(Uses[0].Inst->getParent()))
754      std::swap(Result, Uses[0].Base);
755    return Result;
756  }
757
758  // To find common subexpressions, count how many of Uses use each expression.
759  // If any subexpressions are used Uses.size() times, they are common.
760  // Also track whether all uses of each expression can be moved into an
761  // an addressing mode "for free"; such expressions are left within the loop.
762  // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
763  std::map<const SCEV *, SubExprUseData> SubExpressionUseData;
764
765  // UniqueSubExprs - Keep track of all of the subexpressions we see in the
766  // order we see them.
767  SmallVector<const SCEV *, 16> UniqueSubExprs;
768
769  SmallVector<const SCEV *, 16> SubExprs;
770  unsigned NumUsesInsideLoop = 0;
771  for (unsigned i = 0; i != NumUses; ++i) {
772    // If the user is outside the loop, just ignore it for base computation.
773    // Since the user is outside the loop, it must be *after* the loop (if it
774    // were before, it could not be based on the loop IV).  We don't want users
775    // after the loop to affect base computation of values *inside* the loop,
776    // because we can always add their offsets to the result IV after the loop
777    // is done, ensuring we get good code inside the loop.
778    if (!L->contains(Uses[i].Inst->getParent()))
779      continue;
780    NumUsesInsideLoop++;
781
782    // If the base is zero (which is common), return zero now, there are no
783    // CSEs we can find.
784    if (Uses[i].Base == Zero) return Zero;
785
786    // If this use is as an address we may be able to put CSEs in the addressing
787    // mode rather than hoisting them.
788    bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
789    // We may need the AccessTy below, but only when isAddrUse, so compute it
790    // only in that case.
791    const Type *AccessTy = 0;
792    if (isAddrUse)
793      AccessTy = getAccessType(Uses[i].Inst);
794
795    // Split the expression into subexprs.
796    SeparateSubExprs(SubExprs, Uses[i].Base, SE);
797    // Add one to SubExpressionUseData.Count for each subexpr present, and
798    // if the subexpr is not a valid immediate within an addressing mode use,
799    // set SubExpressionUseData.notAllUsesAreFree.  We definitely want to
800    // hoist these out of the loop (if they are common to all uses).
801    for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
802      if (++SubExpressionUseData[SubExprs[j]].Count == 1)
803        UniqueSubExprs.push_back(SubExprs[j]);
804      if (!isAddrUse || !fitsInAddressMode(SubExprs[j], AccessTy, TLI, false))
805        SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
806    }
807    SubExprs.clear();
808  }
809
810  // Now that we know how many times each is used, build Result.  Iterate over
811  // UniqueSubexprs so that we have a stable ordering.
812  for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
813    std::map<const SCEV *, SubExprUseData>::iterator I =
814       SubExpressionUseData.find(UniqueSubExprs[i]);
815    assert(I != SubExpressionUseData.end() && "Entry not found?");
816    if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
817      if (I->second.notAllUsesAreFree)
818        Result = SE->getAddExpr(Result, I->first);
819      else
820        FreeResult = SE->getAddExpr(FreeResult, I->first);
821    } else
822      // Remove non-cse's from SubExpressionUseData.
823      SubExpressionUseData.erase(I);
824  }
825
826  if (FreeResult != Zero) {
827    // We have some subexpressions that can be subsumed into addressing
828    // modes in every use inside the loop.  However, it's possible that
829    // there are so many of them that the combined FreeResult cannot
830    // be subsumed, or that the target cannot handle both a FreeResult
831    // and a Result in the same instruction (for example because it would
832    // require too many registers).  Check this.
833    for (unsigned i=0; i<NumUses; ++i) {
834      if (!L->contains(Uses[i].Inst->getParent()))
835        continue;
836      // We know this is an addressing mode use; if there are any uses that
837      // are not, FreeResult would be Zero.
838      const Type *AccessTy = getAccessType(Uses[i].Inst);
839      if (!fitsInAddressMode(FreeResult, AccessTy, TLI, Result!=Zero)) {
840        // FIXME:  could split up FreeResult into pieces here, some hoisted
841        // and some not.  There is no obvious advantage to this.
842        Result = SE->getAddExpr(Result, FreeResult);
843        FreeResult = Zero;
844        break;
845      }
846    }
847  }
848
849  // If we found no CSE's, return now.
850  if (Result == Zero) return Result;
851
852  // If we still have a FreeResult, remove its subexpressions from
853  // SubExpressionUseData.  This means they will remain in the use Bases.
854  if (FreeResult != Zero) {
855    SeparateSubExprs(SubExprs, FreeResult, SE);
856    for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
857      std::map<const SCEV *, SubExprUseData>::iterator I =
858         SubExpressionUseData.find(SubExprs[j]);
859      SubExpressionUseData.erase(I);
860    }
861    SubExprs.clear();
862  }
863
864  // Otherwise, remove all of the CSE's we found from each of the base values.
865  for (unsigned i = 0; i != NumUses; ++i) {
866    // Uses outside the loop don't necessarily include the common base, but
867    // the final IV value coming into those uses does.  Instead of trying to
868    // remove the pieces of the common base, which might not be there,
869    // subtract off the base to compensate for this.
870    if (!L->contains(Uses[i].Inst->getParent())) {
871      Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
872      continue;
873    }
874
875    // Split the expression into subexprs.
876    SeparateSubExprs(SubExprs, Uses[i].Base, SE);
877
878    // Remove any common subexpressions.
879    for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
880      if (SubExpressionUseData.count(SubExprs[j])) {
881        SubExprs.erase(SubExprs.begin()+j);
882        --j; --e;
883      }
884
885    // Finally, add the non-shared expressions together.
886    if (SubExprs.empty())
887      Uses[i].Base = Zero;
888    else
889      Uses[i].Base = SE->getAddExpr(SubExprs);
890    SubExprs.clear();
891  }
892
893  return Result;
894}
895
896/// ValidScale - Check whether the given Scale is valid for all loads and
897/// stores in UsersToProcess.
898///
899bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
900                               const std::vector<BasedUser>& UsersToProcess) {
901  if (!TLI)
902    return true;
903
904  for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
905    // If this is a load or other access, pass the type of the access in.
906    const Type *AccessTy = Type::VoidTy;
907    if (isAddressUse(UsersToProcess[i].Inst,
908                     UsersToProcess[i].OperandValToReplace))
909      AccessTy = getAccessType(UsersToProcess[i].Inst);
910    else if (isa<PHINode>(UsersToProcess[i].Inst))
911      continue;
912
913    TargetLowering::AddrMode AM;
914    if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
915      AM.BaseOffs = SC->getValue()->getSExtValue();
916    AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
917    AM.Scale = Scale;
918
919    // If load[imm+r*scale] is illegal, bail out.
920    if (!TLI->isLegalAddressingMode(AM, AccessTy))
921      return false;
922  }
923  return true;
924}
925
926/// ValidOffset - Check whether the given Offset is valid for all loads and
927/// stores in UsersToProcess.
928///
929bool LoopStrengthReduce::ValidOffset(bool HasBaseReg,
930                               int64_t Offset,
931                               int64_t Scale,
932                               const std::vector<BasedUser>& UsersToProcess) {
933  if (!TLI)
934    return true;
935
936  for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
937    // If this is a load or other access, pass the type of the access in.
938    const Type *AccessTy = Type::VoidTy;
939    if (isAddressUse(UsersToProcess[i].Inst,
940                     UsersToProcess[i].OperandValToReplace))
941      AccessTy = getAccessType(UsersToProcess[i].Inst);
942    else if (isa<PHINode>(UsersToProcess[i].Inst))
943      continue;
944
945    TargetLowering::AddrMode AM;
946    if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
947      AM.BaseOffs = SC->getValue()->getSExtValue();
948    AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset;
949    AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
950    AM.Scale = Scale;
951
952    // If load[imm+r*scale] is illegal, bail out.
953    if (!TLI->isLegalAddressingMode(AM, AccessTy))
954      return false;
955  }
956  return true;
957}
958
959/// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
960/// a nop.
961bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
962                                                const Type *Ty2) {
963  if (Ty1 == Ty2)
964    return false;
965  Ty1 = SE->getEffectiveSCEVType(Ty1);
966  Ty2 = SE->getEffectiveSCEVType(Ty2);
967  if (Ty1 == Ty2)
968    return false;
969  if (Ty1->canLosslesslyBitCastTo(Ty2))
970    return false;
971  if (TLI && TLI->isTruncateFree(Ty1, Ty2))
972    return false;
973  return true;
974}
975
976/// CheckForIVReuse - Returns the multiple if the stride is the multiple
977/// of a previous stride and it is a legal value for the target addressing
978/// mode scale component and optional base reg. This allows the users of
979/// this stride to be rewritten as prev iv * factor. It returns 0 if no
980/// reuse is possible.  Factors can be negative on same targets, e.g. ARM.
981///
982/// If all uses are outside the loop, we don't require that all multiplies
983/// be folded into the addressing mode, nor even that the factor be constant;
984/// a multiply (executed once) outside the loop is better than another IV
985/// within.  Well, usually.
986const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
987                                bool AllUsesAreAddresses,
988                                bool AllUsesAreOutsideLoop,
989                                const SCEV *const &Stride,
990                                IVExpr &IV, const Type *Ty,
991                                const std::vector<BasedUser>& UsersToProcess) {
992  if (StrideNoReuse.count(Stride))
993    return SE->getIntegerSCEV(0, Stride->getType());
994
995  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
996    int64_t SInt = SC->getValue()->getSExtValue();
997    for (unsigned NewStride = 0, e = IU->StrideOrder.size();
998         NewStride != e; ++NewStride) {
999      std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1000                IVsByStride.find(IU->StrideOrder[NewStride]);
1001      if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first) ||
1002          StrideNoReuse.count(SI->first))
1003        continue;
1004      int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1005      if (SI->first != Stride &&
1006          (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0))
1007        continue;
1008      int64_t Scale = SInt / SSInt;
1009      // Check that this stride is valid for all the types used for loads and
1010      // stores; if it can be used for some and not others, we might as well use
1011      // the original stride everywhere, since we have to create the IV for it
1012      // anyway. If the scale is 1, then we don't need to worry about folding
1013      // multiplications.
1014      if (Scale == 1 ||
1015          (AllUsesAreAddresses &&
1016           ValidScale(HasBaseReg, Scale, UsersToProcess))) {
1017        // Prefer to reuse an IV with a base of zero.
1018        for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1019               IE = SI->second.IVs.end(); II != IE; ++II)
1020          // Only reuse previous IV if it would not require a type conversion
1021          // and if the base difference can be folded.
1022          if (II->Base->isZero() &&
1023              !RequiresTypeConversion(II->Base->getType(), Ty)) {
1024            IV = *II;
1025            return SE->getIntegerSCEV(Scale, Stride->getType());
1026          }
1027        // Otherwise, settle for an IV with a foldable base.
1028        if (AllUsesAreAddresses)
1029          for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1030                 IE = SI->second.IVs.end(); II != IE; ++II)
1031            // Only reuse previous IV if it would not require a type conversion
1032            // and if the base difference can be folded.
1033            if (SE->getEffectiveSCEVType(II->Base->getType()) ==
1034                SE->getEffectiveSCEVType(Ty) &&
1035                isa<SCEVConstant>(II->Base)) {
1036              int64_t Base =
1037                cast<SCEVConstant>(II->Base)->getValue()->getSExtValue();
1038              if (Base > INT32_MIN && Base <= INT32_MAX &&
1039                  ValidOffset(HasBaseReg, -Base * Scale,
1040                              Scale, UsersToProcess)) {
1041                IV = *II;
1042                return SE->getIntegerSCEV(Scale, Stride->getType());
1043              }
1044            }
1045      }
1046    }
1047  } else if (AllUsesAreOutsideLoop) {
1048    // Accept nonconstant strides here; it is really really right to substitute
1049    // an existing IV if we can.
1050    for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1051         NewStride != e; ++NewStride) {
1052      std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1053                IVsByStride.find(IU->StrideOrder[NewStride]);
1054      if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1055        continue;
1056      int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1057      if (SI->first != Stride && SSInt != 1)
1058        continue;
1059      for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1060             IE = SI->second.IVs.end(); II != IE; ++II)
1061        // Accept nonzero base here.
1062        // Only reuse previous IV if it would not require a type conversion.
1063        if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1064          IV = *II;
1065          return Stride;
1066        }
1067    }
1068    // Special case, old IV is -1*x and this one is x.  Can treat this one as
1069    // -1*old.
1070    for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1071         NewStride != e; ++NewStride) {
1072      std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1073                IVsByStride.find(IU->StrideOrder[NewStride]);
1074      if (SI == IVsByStride.end())
1075        continue;
1076      if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1077        if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1078          if (Stride == ME->getOperand(1) &&
1079              SC->getValue()->getSExtValue() == -1LL)
1080            for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1081                   IE = SI->second.IVs.end(); II != IE; ++II)
1082              // Accept nonzero base here.
1083              // Only reuse previous IV if it would not require type conversion.
1084              if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1085                IV = *II;
1086                return SE->getIntegerSCEV(-1LL, Stride->getType());
1087              }
1088    }
1089  }
1090  return SE->getIntegerSCEV(0, Stride->getType());
1091}
1092
1093/// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1094/// returns true if Val's isUseOfPostIncrementedValue is true.
1095static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1096  return Val.isUseOfPostIncrementedValue;
1097}
1098
1099/// isNonConstantNegative - Return true if the specified scev is negated, but
1100/// not a constant.
1101static bool isNonConstantNegative(const SCEV *const &Expr) {
1102  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1103  if (!Mul) return false;
1104
1105  // If there is a constant factor, it will be first.
1106  const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1107  if (!SC) return false;
1108
1109  // Return true if the value is negative, this matches things like (-42 * V).
1110  return SC->getValue()->getValue().isNegative();
1111}
1112
1113/// CollectIVUsers - Transform our list of users and offsets to a bit more
1114/// complex table. In this new vector, each 'BasedUser' contains 'Base', the base
1115/// of the strided accesses, as well as the old information from Uses. We
1116/// progressively move information from the Base field to the Imm field, until
1117/// we eventually have the full access expression to rewrite the use.
1118const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *const &Stride,
1119                                              IVUsersOfOneStride &Uses,
1120                                              Loop *L,
1121                                              bool &AllUsesAreAddresses,
1122                                              bool &AllUsesAreOutsideLoop,
1123                                       std::vector<BasedUser> &UsersToProcess) {
1124  // FIXME: Generalize to non-affine IV's.
1125  if (!Stride->isLoopInvariant(L))
1126    return SE->getIntegerSCEV(0, Stride->getType());
1127
1128  UsersToProcess.reserve(Uses.Users.size());
1129  for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(),
1130       E = Uses.Users.end(); I != E; ++I) {
1131    UsersToProcess.push_back(BasedUser(*I, SE));
1132
1133    // Move any loop variant operands from the offset field to the immediate
1134    // field of the use, so that we don't try to use something before it is
1135    // computed.
1136    MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1137                                     UsersToProcess.back().Imm, L, SE);
1138    assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1139           "Base value is not loop invariant!");
1140  }
1141
1142  // We now have a whole bunch of uses of like-strided induction variables, but
1143  // they might all have different bases.  We want to emit one PHI node for this
1144  // stride which we fold as many common expressions (between the IVs) into as
1145  // possible.  Start by identifying the common expressions in the base values
1146  // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1147  // "A+B"), emit it to the preheader, then remove the expression from the
1148  // UsersToProcess base values.
1149  const SCEV *CommonExprs =
1150    RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1151
1152  // Next, figure out what we can represent in the immediate fields of
1153  // instructions.  If we can represent anything there, move it to the imm
1154  // fields of the BasedUsers.  We do this so that it increases the commonality
1155  // of the remaining uses.
1156  unsigned NumPHI = 0;
1157  bool HasAddress = false;
1158  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1159    // If the user is not in the current loop, this means it is using the exit
1160    // value of the IV.  Do not put anything in the base, make sure it's all in
1161    // the immediate field to allow as much factoring as possible.
1162    if (!L->contains(UsersToProcess[i].Inst->getParent())) {
1163      UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1164                                             UsersToProcess[i].Base);
1165      UsersToProcess[i].Base =
1166        SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1167    } else {
1168      // Not all uses are outside the loop.
1169      AllUsesAreOutsideLoop = false;
1170
1171      // Addressing modes can be folded into loads and stores.  Be careful that
1172      // the store is through the expression, not of the expression though.
1173      bool isPHI = false;
1174      bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1175                                    UsersToProcess[i].OperandValToReplace);
1176      if (isa<PHINode>(UsersToProcess[i].Inst)) {
1177        isPHI = true;
1178        ++NumPHI;
1179      }
1180
1181      if (isAddress)
1182        HasAddress = true;
1183
1184      // If this use isn't an address, then not all uses are addresses.
1185      if (!isAddress && !isPHI)
1186        AllUsesAreAddresses = false;
1187
1188      MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1189                          UsersToProcess[i].Imm, isAddress, L, SE);
1190    }
1191  }
1192
1193  // If one of the use is a PHI node and all other uses are addresses, still
1194  // allow iv reuse. Essentially we are trading one constant multiplication
1195  // for one fewer iv.
1196  if (NumPHI > 1)
1197    AllUsesAreAddresses = false;
1198
1199  // There are no in-loop address uses.
1200  if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
1201    AllUsesAreAddresses = false;
1202
1203  return CommonExprs;
1204}
1205
1206/// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1207/// is valid and profitable for the given set of users of a stride. In
1208/// full strength-reduction mode, all addresses at the current stride are
1209/// strength-reduced all the way down to pointer arithmetic.
1210///
1211bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1212                                   const std::vector<BasedUser> &UsersToProcess,
1213                                   const Loop *L,
1214                                   bool AllUsesAreAddresses,
1215                                   const SCEV *Stride) {
1216  if (!EnableFullLSRMode)
1217    return false;
1218
1219  // The heuristics below aim to avoid increasing register pressure, but
1220  // fully strength-reducing all the addresses increases the number of
1221  // add instructions, so don't do this when optimizing for size.
1222  // TODO: If the loop is large, the savings due to simpler addresses
1223  // may oughtweight the costs of the extra increment instructions.
1224  if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
1225    return false;
1226
1227  // TODO: For now, don't do full strength reduction if there could
1228  // potentially be greater-stride multiples of the current stride
1229  // which could reuse the current stride IV.
1230  if (IU->StrideOrder.back() != Stride)
1231    return false;
1232
1233  // Iterate through the uses to find conditions that automatically rule out
1234  // full-lsr mode.
1235  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1236    const SCEV *Base = UsersToProcess[i].Base;
1237    const SCEV *Imm = UsersToProcess[i].Imm;
1238    // If any users have a loop-variant component, they can't be fully
1239    // strength-reduced.
1240    if (Imm && !Imm->isLoopInvariant(L))
1241      return false;
1242    // If there are to users with the same base and the difference between
1243    // the two Imm values can't be folded into the address, full
1244    // strength reduction would increase register pressure.
1245    do {
1246      const SCEV *CurImm = UsersToProcess[i].Imm;
1247      if ((CurImm || Imm) && CurImm != Imm) {
1248        if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
1249        if (!Imm)       Imm = SE->getIntegerSCEV(0, Stride->getType());
1250        const Instruction *Inst = UsersToProcess[i].Inst;
1251        const Type *AccessTy = getAccessType(Inst);
1252        const SCEV *Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1253        if (!Diff->isZero() &&
1254            (!AllUsesAreAddresses ||
1255             !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true)))
1256          return false;
1257      }
1258    } while (++i != e && Base == UsersToProcess[i].Base);
1259  }
1260
1261  // If there's exactly one user in this stride, fully strength-reducing it
1262  // won't increase register pressure. If it's starting from a non-zero base,
1263  // it'll be simpler this way.
1264  if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
1265    return true;
1266
1267  // Otherwise, if there are any users in this stride that don't require
1268  // a register for their base, full strength-reduction will increase
1269  // register pressure.
1270  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1271    if (UsersToProcess[i].Base->isZero())
1272      return false;
1273
1274  // Otherwise, go for it.
1275  return true;
1276}
1277
1278/// InsertAffinePhi Create and insert a PHI node for an induction variable
1279/// with the specified start and step values in the specified loop.
1280///
1281/// If NegateStride is true, the stride should be negated by using a
1282/// subtract instead of an add.
1283///
1284/// Return the created phi node.
1285///
1286static PHINode *InsertAffinePhi(const SCEV *Start, const SCEV *Step,
1287                                Instruction *IVIncInsertPt,
1288                                const Loop *L,
1289                                SCEVExpander &Rewriter) {
1290  assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
1291  assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
1292
1293  BasicBlock *Header = L->getHeader();
1294  BasicBlock *Preheader = L->getLoopPreheader();
1295  BasicBlock *LatchBlock = L->getLoopLatch();
1296  const Type *Ty = Start->getType();
1297  Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
1298
1299  PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
1300  PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
1301                  Preheader);
1302
1303  // If the stride is negative, insert a sub instead of an add for the
1304  // increment.
1305  bool isNegative = isNonConstantNegative(Step);
1306  const SCEV *IncAmount = Step;
1307  if (isNegative)
1308    IncAmount = Rewriter.SE.getNegativeSCEV(Step);
1309
1310  // Insert an add instruction right before the terminator corresponding
1311  // to the back-edge or just before the only use. The location is determined
1312  // by the caller and passed in as IVIncInsertPt.
1313  Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty,
1314                                        Preheader->getTerminator());
1315  Instruction *IncV;
1316  if (isNegative) {
1317    IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
1318                                     IVIncInsertPt);
1319  } else {
1320    IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
1321                                     IVIncInsertPt);
1322  }
1323  if (!isa<ConstantInt>(StepV)) ++NumVariable;
1324
1325  PN->addIncoming(IncV, LatchBlock);
1326
1327  ++NumInserted;
1328  return PN;
1329}
1330
1331static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
1332  // We want to emit code for users inside the loop first.  To do this, we
1333  // rearrange BasedUser so that the entries at the end have
1334  // isUseOfPostIncrementedValue = false, because we pop off the end of the
1335  // vector (so we handle them first).
1336  std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1337                 PartitionByIsUseOfPostIncrementedValue);
1338
1339  // Sort this by base, so that things with the same base are handled
1340  // together.  By partitioning first and stable-sorting later, we are
1341  // guaranteed that within each base we will pop off users from within the
1342  // loop before users outside of the loop with a particular base.
1343  //
1344  // We would like to use stable_sort here, but we can't.  The problem is that
1345  // const SCEV *'s don't have a deterministic ordering w.r.t to each other, so
1346  // we don't have anything to do a '<' comparison on.  Because we think the
1347  // number of uses is small, do a horrible bubble sort which just relies on
1348  // ==.
1349  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1350    // Get a base value.
1351    const SCEV *Base = UsersToProcess[i].Base;
1352
1353    // Compact everything with this base to be consecutive with this one.
1354    for (unsigned j = i+1; j != e; ++j) {
1355      if (UsersToProcess[j].Base == Base) {
1356        std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1357        ++i;
1358      }
1359    }
1360  }
1361}
1362
1363/// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1364/// UsersToProcess, meaning lowering addresses all the way down to direct
1365/// pointer arithmetic.
1366///
1367void
1368LoopStrengthReduce::PrepareToStrengthReduceFully(
1369                                        std::vector<BasedUser> &UsersToProcess,
1370                                        const SCEV *Stride,
1371                                        const SCEV *CommonExprs,
1372                                        const Loop *L,
1373                                        SCEVExpander &PreheaderRewriter) {
1374  DOUT << "  Fully reducing all users\n";
1375
1376  // Rewrite the UsersToProcess records, creating a separate PHI for each
1377  // unique Base value.
1378  Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator();
1379  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1380    // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1381    // pick the first Imm value here to start with, and adjust it for the
1382    // other uses.
1383    const SCEV *Imm = UsersToProcess[i].Imm;
1384    const SCEV *Base = UsersToProcess[i].Base;
1385    const SCEV *Start = SE->getAddExpr(CommonExprs, Base, Imm);
1386    PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L,
1387                                   PreheaderRewriter);
1388    // Loop over all the users with the same base.
1389    do {
1390      UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
1391      UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1392      UsersToProcess[i].Phi = Phi;
1393      assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
1394             "ShouldUseFullStrengthReductionMode should reject this!");
1395    } while (++i != e && Base == UsersToProcess[i].Base);
1396  }
1397}
1398
1399/// FindIVIncInsertPt - Return the location to insert the increment instruction.
1400/// If the only use if a use of postinc value, (must be the loop termination
1401/// condition), then insert it just before the use.
1402static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess,
1403                                      const Loop *L) {
1404  if (UsersToProcess.size() == 1 &&
1405      UsersToProcess[0].isUseOfPostIncrementedValue &&
1406      L->contains(UsersToProcess[0].Inst->getParent()))
1407    return UsersToProcess[0].Inst;
1408  return L->getLoopLatch()->getTerminator();
1409}
1410
1411/// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1412/// given users to share.
1413///
1414void
1415LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1416                                         std::vector<BasedUser> &UsersToProcess,
1417                                         const SCEV *Stride,
1418                                         const SCEV *CommonExprs,
1419                                         Value *CommonBaseV,
1420                                         Instruction *IVIncInsertPt,
1421                                         const Loop *L,
1422                                         SCEVExpander &PreheaderRewriter) {
1423  DOUT << "  Inserting new PHI:\n";
1424
1425  PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
1426                                 Stride, IVIncInsertPt, L,
1427                                 PreheaderRewriter);
1428
1429  // Remember this in case a later stride is multiple of this.
1430  IVsByStride[Stride].addIV(Stride, CommonExprs, Phi);
1431
1432  // All the users will share this new IV.
1433  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1434    UsersToProcess[i].Phi = Phi;
1435
1436  DOUT << "    IV=";
1437  DEBUG(WriteAsOperand(*DOUT, Phi, /*PrintType=*/false));
1438  DOUT << "\n";
1439}
1440
1441/// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to
1442/// reuse an induction variable with a stride that is a factor of the current
1443/// induction variable.
1444///
1445void
1446LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1447                                         std::vector<BasedUser> &UsersToProcess,
1448                                         Value *CommonBaseV,
1449                                         const IVExpr &ReuseIV,
1450                                         Instruction *PreInsertPt) {
1451  DOUT << "  Rewriting in terms of existing IV of STRIDE " << *ReuseIV.Stride
1452       << " and BASE " << *ReuseIV.Base << "\n";
1453
1454  // All the users will share the reused IV.
1455  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1456    UsersToProcess[i].Phi = ReuseIV.PHI;
1457
1458  Constant *C = dyn_cast<Constant>(CommonBaseV);
1459  if (C &&
1460      (!C->isNullValue() &&
1461       !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
1462                         TLI, false)))
1463    // We want the common base emitted into the preheader! This is just
1464    // using cast as a copy so BitCast (no-op cast) is appropriate
1465    CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1466                                  "commonbase", PreInsertPt);
1467}
1468
1469static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset,
1470                                    const Type *AccessTy,
1471                                   std::vector<BasedUser> &UsersToProcess,
1472                                   const TargetLowering *TLI) {
1473  SmallVector<Instruction*, 16> AddrModeInsts;
1474  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1475    if (UsersToProcess[i].isUseOfPostIncrementedValue)
1476      continue;
1477    ExtAddrMode AddrMode =
1478      AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace,
1479                                   AccessTy, UsersToProcess[i].Inst,
1480                                   AddrModeInsts, *TLI);
1481    if (GV && GV != AddrMode.BaseGV)
1482      return false;
1483    if (Offset && !AddrMode.BaseOffs)
1484      // FIXME: How to accurate check it's immediate offset is folded.
1485      return false;
1486    AddrModeInsts.clear();
1487  }
1488  return true;
1489}
1490
1491/// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
1492/// stride of IV.  All of the users may have different starting values, and this
1493/// may not be the only stride.
1494void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride,
1495                                                      IVUsersOfOneStride &Uses,
1496                                                      Loop *L) {
1497  // If all the users are moved to another stride, then there is nothing to do.
1498  if (Uses.Users.empty())
1499    return;
1500
1501  // Keep track if every use in UsersToProcess is an address. If they all are,
1502  // we may be able to rewrite the entire collection of them in terms of a
1503  // smaller-stride IV.
1504  bool AllUsesAreAddresses = true;
1505
1506  // Keep track if every use of a single stride is outside the loop.  If so,
1507  // we want to be more aggressive about reusing a smaller-stride IV; a
1508  // multiply outside the loop is better than another IV inside.  Well, usually.
1509  bool AllUsesAreOutsideLoop = true;
1510
1511  // Transform our list of users and offsets to a bit more complex table.  In
1512  // this new vector, each 'BasedUser' contains 'Base' the base of the
1513  // strided accessas well as the old information from Uses.  We progressively
1514  // move information from the Base field to the Imm field, until we eventually
1515  // have the full access expression to rewrite the use.
1516  std::vector<BasedUser> UsersToProcess;
1517  const SCEV *CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1518                                          AllUsesAreOutsideLoop,
1519                                          UsersToProcess);
1520
1521  // Sort the UsersToProcess array so that users with common bases are
1522  // next to each other.
1523  SortUsersToProcess(UsersToProcess);
1524
1525  // If we managed to find some expressions in common, we'll need to carry
1526  // their value in a register and add it in for each use. This will take up
1527  // a register operand, which potentially restricts what stride values are
1528  // valid.
1529  bool HaveCommonExprs = !CommonExprs->isZero();
1530  const Type *ReplacedTy = CommonExprs->getType();
1531
1532  // If all uses are addresses, consider sinking the immediate part of the
1533  // common expression back into uses if they can fit in the immediate fields.
1534  if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
1535    const SCEV *NewCommon = CommonExprs;
1536    const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy);
1537    MoveImmediateValues(TLI, Type::VoidTy, NewCommon, Imm, true, L, SE);
1538    if (!Imm->isZero()) {
1539      bool DoSink = true;
1540
1541      // If the immediate part of the common expression is a GV, check if it's
1542      // possible to fold it into the target addressing mode.
1543      GlobalValue *GV = 0;
1544      if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
1545        GV = dyn_cast<GlobalValue>(SU->getValue());
1546      int64_t Offset = 0;
1547      if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
1548        Offset = SC->getValue()->getSExtValue();
1549      if (GV || Offset)
1550        // Pass VoidTy as the AccessTy to be conservative, because
1551        // there could be multiple access types among all the uses.
1552        DoSink = IsImmFoldedIntoAddrMode(GV, Offset, Type::VoidTy,
1553                                         UsersToProcess, TLI);
1554
1555      if (DoSink) {
1556        DOUT << "  Sinking " << *Imm << " back down into uses\n";
1557        for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1558          UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm);
1559        CommonExprs = NewCommon;
1560        HaveCommonExprs = !CommonExprs->isZero();
1561        ++NumImmSunk;
1562      }
1563    }
1564  }
1565
1566  // Now that we know what we need to do, insert the PHI node itself.
1567  //
1568  DOUT << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
1569       << *Stride << ":\n"
1570       << "  Common base: " << *CommonExprs << "\n";
1571
1572  SCEVExpander Rewriter(*SE);
1573  SCEVExpander PreheaderRewriter(*SE);
1574
1575  BasicBlock  *Preheader = L->getLoopPreheader();
1576  Instruction *PreInsertPt = Preheader->getTerminator();
1577  BasicBlock *LatchBlock = L->getLoopLatch();
1578  Instruction *IVIncInsertPt = LatchBlock->getTerminator();
1579
1580  Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
1581
1582  const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
1583  IVExpr   ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
1584                   SE->getIntegerSCEV(0, Type::Int32Ty),
1585                   0);
1586
1587  /// Choose a strength-reduction strategy and prepare for it by creating
1588  /// the necessary PHIs and adjusting the bookkeeping.
1589  if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
1590                                         AllUsesAreAddresses, Stride)) {
1591    PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
1592                                 PreheaderRewriter);
1593  } else {
1594    // Emit the initial base value into the loop preheader.
1595    CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy,
1596                                                  PreInsertPt);
1597
1598    // If all uses are addresses, check if it is possible to reuse an IV.  The
1599    // new IV must have a stride that is a multiple of the old stride; the
1600    // multiple must be a number that can be encoded in the scale field of the
1601    // target addressing mode; and we must have a valid instruction after this
1602    // substitution, including the immediate field, if any.
1603    RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1604                                    AllUsesAreOutsideLoop,
1605                                    Stride, ReuseIV, ReplacedTy,
1606                                    UsersToProcess);
1607    if (!RewriteFactor->isZero())
1608      PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
1609                                               ReuseIV, PreInsertPt);
1610    else {
1611      IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L);
1612      PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
1613                                        CommonBaseV, IVIncInsertPt,
1614                                        L, PreheaderRewriter);
1615    }
1616  }
1617
1618  // Process all the users now, replacing their strided uses with
1619  // strength-reduced forms.  This outer loop handles all bases, the inner
1620  // loop handles all users of a particular base.
1621  while (!UsersToProcess.empty()) {
1622    const SCEV *Base = UsersToProcess.back().Base;
1623    Instruction *Inst = UsersToProcess.back().Inst;
1624
1625    // Emit the code for Base into the preheader.
1626    Value *BaseV = 0;
1627    if (!Base->isZero()) {
1628      BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt);
1629
1630      DOUT << "  INSERTING code for BASE = " << *Base << ":";
1631      if (BaseV->hasName())
1632        DOUT << " Result value name = %" << BaseV->getNameStr();
1633      DOUT << "\n";
1634
1635      // If BaseV is a non-zero constant, make sure that it gets inserted into
1636      // the preheader, instead of being forward substituted into the uses.  We
1637      // do this by forcing a BitCast (noop cast) to be inserted into the
1638      // preheader in this case.
1639      if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false) &&
1640          isa<Constant>(BaseV)) {
1641        // We want this constant emitted into the preheader! This is just
1642        // using cast as a copy so BitCast (no-op cast) is appropriate
1643        BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1644                                PreInsertPt);
1645      }
1646    }
1647
1648    // Emit the code to add the immediate offset to the Phi value, just before
1649    // the instructions that we identified as using this stride and base.
1650    do {
1651      // FIXME: Use emitted users to emit other users.
1652      BasedUser &User = UsersToProcess.back();
1653
1654      DOUT << "    Examining ";
1655      if (User.isUseOfPostIncrementedValue)
1656        DOUT << "postinc";
1657      else
1658        DOUT << "preinc";
1659      DOUT << " use ";
1660      DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace,
1661                           /*PrintType=*/false));
1662      DOUT << " in Inst: " << *(User.Inst);
1663
1664      // If this instruction wants to use the post-incremented value, move it
1665      // after the post-inc and use its value instead of the PHI.
1666      Value *RewriteOp = User.Phi;
1667      if (User.isUseOfPostIncrementedValue) {
1668        RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock);
1669        // If this user is in the loop, make sure it is the last thing in the
1670        // loop to ensure it is dominated by the increment. In case it's the
1671        // only use of the iv, the increment instruction is already before the
1672        // use.
1673        if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt)
1674          User.Inst->moveBefore(IVIncInsertPt);
1675      }
1676
1677      const SCEV *RewriteExpr = SE->getUnknown(RewriteOp);
1678
1679      if (SE->getEffectiveSCEVType(RewriteOp->getType()) !=
1680          SE->getEffectiveSCEVType(ReplacedTy)) {
1681        assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
1682               SE->getTypeSizeInBits(ReplacedTy) &&
1683               "Unexpected widening cast!");
1684        RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
1685      }
1686
1687      // If we had to insert new instructions for RewriteOp, we have to
1688      // consider that they may not have been able to end up immediately
1689      // next to RewriteOp, because non-PHI instructions may never precede
1690      // PHI instructions in a block. In this case, remember where the last
1691      // instruction was inserted so that if we're replacing a different
1692      // PHI node, we can use the later point to expand the final
1693      // RewriteExpr.
1694      Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1695      if (RewriteOp == User.Phi) NewBasePt = 0;
1696
1697      // Clear the SCEVExpander's expression map so that we are guaranteed
1698      // to have the code emitted where we expect it.
1699      Rewriter.clear();
1700
1701      // If we are reusing the iv, then it must be multiplied by a constant
1702      // factor to take advantage of the addressing mode scale component.
1703      if (!RewriteFactor->isZero()) {
1704        // If we're reusing an IV with a nonzero base (currently this happens
1705        // only when all reuses are outside the loop) subtract that base here.
1706        // The base has been used to initialize the PHI node but we don't want
1707        // it here.
1708        if (!ReuseIV.Base->isZero()) {
1709          const SCEV *typedBase = ReuseIV.Base;
1710          if (SE->getEffectiveSCEVType(RewriteExpr->getType()) !=
1711              SE->getEffectiveSCEVType(ReuseIV.Base->getType())) {
1712            // It's possible the original IV is a larger type than the new IV,
1713            // in which case we have to truncate the Base.  We checked in
1714            // RequiresTypeConversion that this is valid.
1715            assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
1716                   SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
1717                   "Unexpected lengthening conversion!");
1718            typedBase = SE->getTruncateExpr(ReuseIV.Base,
1719                                            RewriteExpr->getType());
1720          }
1721          RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
1722        }
1723
1724        // Multiply old variable, with base removed, by new scale factor.
1725        RewriteExpr = SE->getMulExpr(RewriteFactor,
1726                                     RewriteExpr);
1727
1728        // The common base is emitted in the loop preheader. But since we
1729        // are reusing an IV, it has not been used to initialize the PHI node.
1730        // Add it to the expression used to rewrite the uses.
1731        // When this use is outside the loop, we earlier subtracted the
1732        // common base, and are adding it back here.  Use the same expression
1733        // as before, rather than CommonBaseV, so DAGCombiner will zap it.
1734        if (!CommonExprs->isZero()) {
1735          if (L->contains(User.Inst->getParent()))
1736            RewriteExpr = SE->getAddExpr(RewriteExpr,
1737                                       SE->getUnknown(CommonBaseV));
1738          else
1739            RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
1740        }
1741      }
1742
1743      // Now that we know what we need to do, insert code before User for the
1744      // immediate and any loop-variant expressions.
1745      if (BaseV)
1746        // Add BaseV to the PHI value if needed.
1747        RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1748
1749      User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1750                                          Rewriter, L, this, *LI,
1751                                          DeadInsts);
1752
1753      // Mark old value we replaced as possibly dead, so that it is eliminated
1754      // if we just replaced the last use of that value.
1755      DeadInsts.push_back(User.OperandValToReplace);
1756
1757      UsersToProcess.pop_back();
1758      ++NumReduced;
1759
1760      // If there are any more users to process with the same base, process them
1761      // now.  We sorted by base above, so we just have to check the last elt.
1762    } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1763    // TODO: Next, find out which base index is the most common, pull it out.
1764  }
1765
1766  // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1767  // different starting values, into different PHIs.
1768}
1769
1770/// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1771/// set the IV user and stride information and return true, otherwise return
1772/// false.
1773bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
1774                                       const SCEV *const * &CondStride) {
1775  for (unsigned Stride = 0, e = IU->StrideOrder.size();
1776       Stride != e && !CondUse; ++Stride) {
1777    std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1778      IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
1779    assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
1780
1781    for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1782         E = SI->second->Users.end(); UI != E; ++UI)
1783      if (UI->getUser() == Cond) {
1784        // NOTE: we could handle setcc instructions with multiple uses here, but
1785        // InstCombine does it as well for simple uses, it's not clear that it
1786        // occurs enough in real life to handle.
1787        CondUse = UI;
1788        CondStride = &SI->first;
1789        return true;
1790      }
1791  }
1792  return false;
1793}
1794
1795namespace {
1796  // Constant strides come first which in turns are sorted by their absolute
1797  // values. If absolute values are the same, then positive strides comes first.
1798  // e.g.
1799  // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1800  struct StrideCompare {
1801    const ScalarEvolution *SE;
1802    explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
1803
1804    bool operator()(const SCEV *const &LHS, const SCEV *const &RHS) {
1805      const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
1806      const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
1807      if (LHSC && RHSC) {
1808        int64_t  LV = LHSC->getValue()->getSExtValue();
1809        int64_t  RV = RHSC->getValue()->getSExtValue();
1810        uint64_t ALV = (LV < 0) ? -LV : LV;
1811        uint64_t ARV = (RV < 0) ? -RV : RV;
1812        if (ALV == ARV) {
1813          if (LV != RV)
1814            return LV > RV;
1815        } else {
1816          return ALV < ARV;
1817        }
1818
1819        // If it's the same value but different type, sort by bit width so
1820        // that we emit larger induction variables before smaller
1821        // ones, letting the smaller be re-written in terms of larger ones.
1822        return SE->getTypeSizeInBits(RHS->getType()) <
1823               SE->getTypeSizeInBits(LHS->getType());
1824      }
1825      return LHSC && !RHSC;
1826    }
1827  };
1828}
1829
1830/// ChangeCompareStride - If a loop termination compare instruction is the
1831/// only use of its stride, and the compaison is against a constant value,
1832/// try eliminate the stride by moving the compare instruction to another
1833/// stride and change its constant operand accordingly. e.g.
1834///
1835/// loop:
1836/// ...
1837/// v1 = v1 + 3
1838/// v2 = v2 + 1
1839/// if (v2 < 10) goto loop
1840/// =>
1841/// loop:
1842/// ...
1843/// v1 = v1 + 3
1844/// if (v1 < 30) goto loop
1845ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
1846                                                IVStrideUse* &CondUse,
1847                                              const SCEV *const* &CondStride) {
1848  // If there's only one stride in the loop, there's nothing to do here.
1849  if (IU->StrideOrder.size() < 2)
1850    return Cond;
1851  // If there are other users of the condition's stride, don't bother
1852  // trying to change the condition because the stride will still
1853  // remain.
1854  std::map<const SCEV *, IVUsersOfOneStride *>::iterator I =
1855    IU->IVUsesByStride.find(*CondStride);
1856  if (I == IU->IVUsesByStride.end() ||
1857      I->second->Users.size() != 1)
1858    return Cond;
1859  // Only handle constant strides for now.
1860  const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
1861  if (!SC) return Cond;
1862
1863  ICmpInst::Predicate Predicate = Cond->getPredicate();
1864  int64_t CmpSSInt = SC->getValue()->getSExtValue();
1865  unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType());
1866  uint64_t SignBit = 1ULL << (BitWidth-1);
1867  const Type *CmpTy = Cond->getOperand(0)->getType();
1868  const Type *NewCmpTy = NULL;
1869  unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
1870  unsigned NewTyBits = 0;
1871  const SCEV **NewStride = NULL;
1872  Value *NewCmpLHS = NULL;
1873  Value *NewCmpRHS = NULL;
1874  int64_t Scale = 1;
1875  const SCEV *NewOffset = SE->getIntegerSCEV(0, CmpTy);
1876
1877  if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
1878    int64_t CmpVal = C->getValue().getSExtValue();
1879
1880    // Check stride constant and the comparision constant signs to detect
1881    // overflow.
1882    if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
1883      return Cond;
1884
1885    // Look for a suitable stride / iv as replacement.
1886    for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
1887      std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1888        IU->IVUsesByStride.find(IU->StrideOrder[i]);
1889      if (!isa<SCEVConstant>(SI->first))
1890        continue;
1891      int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1892      if (SSInt == CmpSSInt ||
1893          abs64(SSInt) < abs64(CmpSSInt) ||
1894          (SSInt % CmpSSInt) != 0)
1895        continue;
1896
1897      Scale = SSInt / CmpSSInt;
1898      int64_t NewCmpVal = CmpVal * Scale;
1899      APInt Mul = APInt(BitWidth*2, CmpVal, true);
1900      Mul = Mul * APInt(BitWidth*2, Scale, true);
1901      // Check for overflow.
1902      if (!Mul.isSignedIntN(BitWidth))
1903        continue;
1904      // Check for overflow in the stride's type too.
1905      if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType())))
1906        continue;
1907
1908      // Watch out for overflow.
1909      if (ICmpInst::isSignedPredicate(Predicate) &&
1910          (CmpVal & SignBit) != (NewCmpVal & SignBit))
1911        continue;
1912
1913      if (NewCmpVal == CmpVal)
1914        continue;
1915      // Pick the best iv to use trying to avoid a cast.
1916      NewCmpLHS = NULL;
1917      for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1918             E = SI->second->Users.end(); UI != E; ++UI) {
1919        Value *Op = UI->getOperandValToReplace();
1920
1921        // If the IVStrideUse implies a cast, check for an actual cast which
1922        // can be used to find the original IV expression.
1923        if (SE->getEffectiveSCEVType(Op->getType()) !=
1924            SE->getEffectiveSCEVType(SI->first->getType())) {
1925          CastInst *CI = dyn_cast<CastInst>(Op);
1926          // If it's not a simple cast, it's complicated.
1927          if (!CI)
1928            continue;
1929          // If it's a cast from a type other than the stride type,
1930          // it's complicated.
1931          if (CI->getOperand(0)->getType() != SI->first->getType())
1932            continue;
1933          // Ok, we found the IV expression in the stride's type.
1934          Op = CI->getOperand(0);
1935        }
1936
1937        NewCmpLHS = Op;
1938        if (NewCmpLHS->getType() == CmpTy)
1939          break;
1940      }
1941      if (!NewCmpLHS)
1942        continue;
1943
1944      NewCmpTy = NewCmpLHS->getType();
1945      NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
1946      const Type *NewCmpIntTy = IntegerType::get(NewTyBits);
1947      if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
1948        // Check if it is possible to rewrite it using
1949        // an iv / stride of a smaller integer type.
1950        unsigned Bits = NewTyBits;
1951        if (ICmpInst::isSignedPredicate(Predicate))
1952          --Bits;
1953        uint64_t Mask = (1ULL << Bits) - 1;
1954        if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal)
1955          continue;
1956      }
1957
1958      // Don't rewrite if use offset is non-constant and the new type is
1959      // of a different type.
1960      // FIXME: too conservative?
1961      if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset()))
1962        continue;
1963
1964      bool AllUsesAreAddresses = true;
1965      bool AllUsesAreOutsideLoop = true;
1966      std::vector<BasedUser> UsersToProcess;
1967      const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
1968                                              AllUsesAreAddresses,
1969                                              AllUsesAreOutsideLoop,
1970                                              UsersToProcess);
1971      // Avoid rewriting the compare instruction with an iv of new stride
1972      // if it's likely the new stride uses will be rewritten using the
1973      // stride of the compare instruction.
1974      if (AllUsesAreAddresses &&
1975          ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
1976        continue;
1977
1978      // Avoid rewriting the compare instruction with an iv which has
1979      // implicit extension or truncation built into it.
1980      // TODO: This is over-conservative.
1981      if (SE->getTypeSizeInBits(CondUse->getOffset()->getType()) != TyBits)
1982        continue;
1983
1984      // If scale is negative, use swapped predicate unless it's testing
1985      // for equality.
1986      if (Scale < 0 && !Cond->isEquality())
1987        Predicate = ICmpInst::getSwappedPredicate(Predicate);
1988
1989      NewStride = &IU->StrideOrder[i];
1990      if (!isa<PointerType>(NewCmpTy))
1991        NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
1992      else {
1993        Constant *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
1994        NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
1995      }
1996      NewOffset = TyBits == NewTyBits
1997        ? SE->getMulExpr(CondUse->getOffset(),
1998                         SE->getConstant(CmpTy, Scale))
1999        : SE->getConstant(NewCmpIntTy,
2000          cast<SCEVConstant>(CondUse->getOffset())->getValue()
2001            ->getSExtValue()*Scale);
2002      break;
2003    }
2004  }
2005
2006  // Forgo this transformation if it the increment happens to be
2007  // unfortunately positioned after the condition, and the condition
2008  // has multiple uses which prevent it from being moved immediately
2009  // before the branch. See
2010  // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2011  // for an example of this situation.
2012  if (!Cond->hasOneUse()) {
2013    for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
2014         I != E; ++I)
2015      if (I == NewCmpLHS)
2016        return Cond;
2017  }
2018
2019  if (NewCmpRHS) {
2020    // Create a new compare instruction using new stride / iv.
2021    ICmpInst *OldCond = Cond;
2022    // Insert new compare instruction.
2023    Cond = new ICmpInst(OldCond, Predicate, NewCmpLHS, NewCmpRHS,
2024                        L->getHeader()->getName() + ".termcond");
2025
2026    // Remove the old compare instruction. The old indvar is probably dead too.
2027    DeadInsts.push_back(CondUse->getOperandValToReplace());
2028    OldCond->replaceAllUsesWith(Cond);
2029    OldCond->eraseFromParent();
2030
2031    IU->IVUsesByStride[*NewStride]->addUser(NewOffset, Cond, NewCmpLHS);
2032    CondUse = &IU->IVUsesByStride[*NewStride]->Users.back();
2033    CondStride = NewStride;
2034    ++NumEliminated;
2035    Changed = true;
2036  }
2037
2038  return Cond;
2039}
2040
2041/// OptimizeMax - Rewrite the loop's terminating condition if it uses
2042/// a max computation.
2043///
2044/// This is a narrow solution to a specific, but acute, problem. For loops
2045/// like this:
2046///
2047///   i = 0;
2048///   do {
2049///     p[i] = 0.0;
2050///   } while (++i < n);
2051///
2052/// the trip count isn't just 'n', because 'n' might not be positive. And
2053/// unfortunately this can come up even for loops where the user didn't use
2054/// a C do-while loop. For example, seemingly well-behaved top-test loops
2055/// will commonly be lowered like this:
2056//
2057///   if (n > 0) {
2058///     i = 0;
2059///     do {
2060///       p[i] = 0.0;
2061///     } while (++i < n);
2062///   }
2063///
2064/// and then it's possible for subsequent optimization to obscure the if
2065/// test in such a way that indvars can't find it.
2066///
2067/// When indvars can't find the if test in loops like this, it creates a
2068/// max expression, which allows it to give the loop a canonical
2069/// induction variable:
2070///
2071///   i = 0;
2072///   max = n < 1 ? 1 : n;
2073///   do {
2074///     p[i] = 0.0;
2075///   } while (++i != max);
2076///
2077/// Canonical induction variables are necessary because the loop passes
2078/// are designed around them. The most obvious example of this is the
2079/// LoopInfo analysis, which doesn't remember trip count values. It
2080/// expects to be able to rediscover the trip count each time it is
2081/// needed, and it does this using a simple analyis that only succeeds if
2082/// the loop has a canonical induction variable.
2083///
2084/// However, when it comes time to generate code, the maximum operation
2085/// can be quite costly, especially if it's inside of an outer loop.
2086///
2087/// This function solves this problem by detecting this type of loop and
2088/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2089/// the instructions for the maximum computation.
2090///
2091ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
2092                                          IVStrideUse* &CondUse) {
2093  // Check that the loop matches the pattern we're looking for.
2094  if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2095      Cond->getPredicate() != CmpInst::ICMP_NE)
2096    return Cond;
2097
2098  SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2099  if (!Sel || !Sel->hasOneUse()) return Cond;
2100
2101  const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2102  if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2103    return Cond;
2104  const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2105
2106  // Add one to the backedge-taken count to get the trip count.
2107  const SCEV *IterationCount = SE->getAddExpr(BackedgeTakenCount, One);
2108
2109  // Check for a max calculation that matches the pattern.
2110  if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount))
2111    return Cond;
2112  const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount);
2113  if (Max != SE->getSCEV(Sel)) return Cond;
2114
2115  // To handle a max with more than two operands, this optimization would
2116  // require additional checking and setup.
2117  if (Max->getNumOperands() != 2)
2118    return Cond;
2119
2120  const SCEV *MaxLHS = Max->getOperand(0);
2121  const SCEV *MaxRHS = Max->getOperand(1);
2122  if (!MaxLHS || MaxLHS != One) return Cond;
2123
2124  // Check the relevant induction variable for conformance to
2125  // the pattern.
2126  const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
2127  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2128  if (!AR || !AR->isAffine() ||
2129      AR->getStart() != One ||
2130      AR->getStepRecurrence(*SE) != One)
2131    return Cond;
2132
2133  assert(AR->getLoop() == L &&
2134         "Loop condition operand is an addrec in a different loop!");
2135
2136  // Check the right operand of the select, and remember it, as it will
2137  // be used in the new comparison instruction.
2138  Value *NewRHS = 0;
2139  if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS)
2140    NewRHS = Sel->getOperand(1);
2141  else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS)
2142    NewRHS = Sel->getOperand(2);
2143  if (!NewRHS) return Cond;
2144
2145  // Determine the new comparison opcode. It may be signed or unsigned,
2146  // and the original comparison may be either equality or inequality.
2147  CmpInst::Predicate Pred =
2148    isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT;
2149  if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2150    Pred = CmpInst::getInversePredicate(Pred);
2151
2152  // Ok, everything looks ok to change the condition into an SLT or SGE and
2153  // delete the max calculation.
2154  ICmpInst *NewCond =
2155    new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2156
2157  // Delete the max calculation instructions.
2158  Cond->replaceAllUsesWith(NewCond);
2159  CondUse->setUser(NewCond);
2160  Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2161  Cond->eraseFromParent();
2162  Sel->eraseFromParent();
2163  if (Cmp->use_empty())
2164    Cmp->eraseFromParent();
2165  return NewCond;
2166}
2167
2168/// OptimizeShadowIV - If IV is used in a int-to-float cast
2169/// inside the loop then try to eliminate the cast opeation.
2170void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
2171
2172  const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2173  if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2174    return;
2175
2176  for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
2177       ++Stride) {
2178    std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2179      IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2180    assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2181    if (!isa<SCEVConstant>(SI->first))
2182      continue;
2183
2184    for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
2185           E = SI->second->Users.end(); UI != E; /* empty */) {
2186      ilist<IVStrideUse>::iterator CandidateUI = UI;
2187      ++UI;
2188      Instruction *ShadowUse = CandidateUI->getUser();
2189      const Type *DestTy = NULL;
2190
2191      /* If shadow use is a int->float cast then insert a second IV
2192         to eliminate this cast.
2193
2194           for (unsigned i = 0; i < n; ++i)
2195             foo((double)i);
2196
2197         is transformed into
2198
2199           double d = 0.0;
2200           for (unsigned i = 0; i < n; ++i, ++d)
2201             foo(d);
2202      */
2203      if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser()))
2204        DestTy = UCast->getDestTy();
2205      else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser()))
2206        DestTy = SCast->getDestTy();
2207      if (!DestTy) continue;
2208
2209      if (TLI) {
2210        // If target does not support DestTy natively then do not apply
2211        // this transformation.
2212        EVT DVT = TLI->getValueType(DestTy);
2213        if (!TLI->isTypeLegal(DVT)) continue;
2214      }
2215
2216      PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2217      if (!PH) continue;
2218      if (PH->getNumIncomingValues() != 2) continue;
2219
2220      const Type *SrcTy = PH->getType();
2221      int Mantissa = DestTy->getFPMantissaWidth();
2222      if (Mantissa == -1) continue;
2223      if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
2224        continue;
2225
2226      unsigned Entry, Latch;
2227      if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2228        Entry = 0;
2229        Latch = 1;
2230      } else {
2231        Entry = 1;
2232        Latch = 0;
2233      }
2234
2235      ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2236      if (!Init) continue;
2237      Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2238
2239      BinaryOperator *Incr =
2240        dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2241      if (!Incr) continue;
2242      if (Incr->getOpcode() != Instruction::Add
2243          && Incr->getOpcode() != Instruction::Sub)
2244        continue;
2245
2246      /* Initialize new IV, double d = 0.0 in above example. */
2247      ConstantInt *C = NULL;
2248      if (Incr->getOperand(0) == PH)
2249        C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2250      else if (Incr->getOperand(1) == PH)
2251        C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2252      else
2253        continue;
2254
2255      if (!C) continue;
2256
2257      /* Add new PHINode. */
2258      PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2259
2260      /* create new increment. '++d' in above example. */
2261      Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2262      BinaryOperator *NewIncr =
2263        BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
2264                                 Instruction::FAdd : Instruction::FSub,
2265                               NewPH, CFP, "IV.S.next.", Incr);
2266
2267      NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2268      NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2269
2270      /* Remove cast operation */
2271      ShadowUse->replaceAllUsesWith(NewPH);
2272      ShadowUse->eraseFromParent();
2273      NumShadow++;
2274      break;
2275    }
2276  }
2277}
2278
2279/// OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2280/// uses in the loop, look to see if we can eliminate some, in favor of using
2281/// common indvars for the different uses.
2282void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2283  // TODO: implement optzns here.
2284
2285  OptimizeShadowIV(L);
2286}
2287
2288/// OptimizeLoopTermCond - Change loop terminating condition to use the
2289/// postinc iv when possible.
2290void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
2291  // Finally, get the terminating condition for the loop if possible.  If we
2292  // can, we want to change it to use a post-incremented version of its
2293  // induction variable, to allow coalescing the live ranges for the IV into
2294  // one register value.
2295  BasicBlock *LatchBlock = L->getLoopLatch();
2296  BasicBlock *ExitingBlock = L->getExitingBlock();
2297  LLVMContext &Context = LatchBlock->getContext();
2298
2299  if (!ExitingBlock)
2300    // Multiple exits, just look at the exit in the latch block if there is one.
2301    ExitingBlock = LatchBlock;
2302  BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2303  if (!TermBr)
2304    return;
2305  if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2306    return;
2307
2308  // Search IVUsesByStride to find Cond's IVUse if there is one.
2309  IVStrideUse *CondUse = 0;
2310  const SCEV *const *CondStride = 0;
2311  ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2312  if (!FindIVUserForCond(Cond, CondUse, CondStride))
2313    return; // setcc doesn't use the IV.
2314
2315  if (ExitingBlock != LatchBlock) {
2316    if (!Cond->hasOneUse())
2317      // See below, we don't want the condition to be cloned.
2318      return;
2319
2320    // If exiting block is the latch block, we know it's safe and profitable to
2321    // transform the icmp to use post-inc iv. Otherwise do so only if it would
2322    // not reuse another iv and its iv would be reused by other uses. We are
2323    // optimizing for the case where the icmp is the only use of the iv.
2324    IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[*CondStride];
2325    for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
2326         E = StrideUses.Users.end(); I != E; ++I) {
2327      if (I->getUser() == Cond)
2328        continue;
2329      if (!I->isUseOfPostIncrementedValue())
2330        return;
2331    }
2332
2333    // FIXME: This is expensive, and worse still ChangeCompareStride does a
2334    // similar check. Can we perform all the icmp related transformations after
2335    // StrengthReduceStridedIVUsers?
2336    if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride)) {
2337      int64_t SInt = SC->getValue()->getSExtValue();
2338      for (unsigned NewStride = 0, ee = IU->StrideOrder.size(); NewStride != ee;
2339           ++NewStride) {
2340        std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2341          IU->IVUsesByStride.find(IU->StrideOrder[NewStride]);
2342        if (!isa<SCEVConstant>(SI->first) || SI->first == *CondStride)
2343          continue;
2344        int64_t SSInt =
2345          cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
2346        if (SSInt == SInt)
2347          return; // This can definitely be reused.
2348        if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0)
2349          continue;
2350        int64_t Scale = SSInt / SInt;
2351        bool AllUsesAreAddresses = true;
2352        bool AllUsesAreOutsideLoop = true;
2353        std::vector<BasedUser> UsersToProcess;
2354        const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
2355                                                AllUsesAreAddresses,
2356                                                AllUsesAreOutsideLoop,
2357                                                UsersToProcess);
2358        // Avoid rewriting the compare instruction with an iv of new stride
2359        // if it's likely the new stride uses will be rewritten using the
2360        // stride of the compare instruction.
2361        if (AllUsesAreAddresses &&
2362            ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
2363          return;
2364      }
2365    }
2366
2367    StrideNoReuse.insert(*CondStride);
2368  }
2369
2370  // If the trip count is computed in terms of a max (due to ScalarEvolution
2371  // being unable to find a sufficient guard, for example), change the loop
2372  // comparison to use SLT or ULT instead of NE.
2373  Cond = OptimizeMax(L, Cond, CondUse);
2374
2375  // If possible, change stride and operands of the compare instruction to
2376  // eliminate one stride.
2377  if (ExitingBlock == LatchBlock)
2378    Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2379
2380  // It's possible for the setcc instruction to be anywhere in the loop, and
2381  // possible for it to have multiple users.  If it is not immediately before
2382  // the latch block branch, move it.
2383  if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2384    if (Cond->hasOneUse()) {   // Condition has a single use, just move it.
2385      Cond->moveBefore(TermBr);
2386    } else {
2387      // Otherwise, clone the terminating condition and insert into the loopend.
2388      Cond = cast<ICmpInst>(Cond->clone(Context));
2389      Cond->setName(L->getHeader()->getName() + ".termcond");
2390      LatchBlock->getInstList().insert(TermBr, Cond);
2391
2392      // Clone the IVUse, as the old use still exists!
2393      IU->IVUsesByStride[*CondStride]->addUser(CondUse->getOffset(), Cond,
2394                                             CondUse->getOperandValToReplace());
2395      CondUse = &IU->IVUsesByStride[*CondStride]->Users.back();
2396    }
2397  }
2398
2399  // If we get to here, we know that we can transform the setcc instruction to
2400  // use the post-incremented version of the IV, allowing us to coalesce the
2401  // live ranges for the IV correctly.
2402  CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), *CondStride));
2403  CondUse->setIsUseOfPostIncrementedValue(true);
2404  Changed = true;
2405
2406  ++NumLoopCond;
2407}
2408
2409/// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding
2410/// when to exit the loop is used only for that purpose, try to rearrange things
2411/// so it counts down to a test against zero.
2412void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
2413
2414  // If the number of times the loop is executed isn't computable, give up.
2415  const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2416  if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2417    return;
2418
2419  // Get the terminating condition for the loop if possible (this isn't
2420  // necessarily in the latch, or a block that's a predecessor of the header).
2421  if (!L->getExitBlock())
2422    return; // More than one loop exit blocks.
2423
2424  // Okay, there is one exit block.  Try to find the condition that causes the
2425  // loop to be exited.
2426  BasicBlock *ExitingBlock = L->getExitingBlock();
2427  if (!ExitingBlock)
2428    return; // More than one block exiting!
2429
2430  // Okay, we've computed the exiting block.  See what condition causes us to
2431  // exit.
2432  //
2433  // FIXME: we should be able to handle switch instructions (with a single exit)
2434  BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2435  if (TermBr == 0) return;
2436  assert(TermBr->isConditional() && "If unconditional, it can't be in loop!");
2437  if (!isa<ICmpInst>(TermBr->getCondition()))
2438    return;
2439  ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2440
2441  // Handle only tests for equality for the moment, and only stride 1.
2442  if (Cond->getPredicate() != CmpInst::ICMP_EQ)
2443    return;
2444  const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
2445  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2446  const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2447  if (!AR || !AR->isAffine() || AR->getStepRecurrence(*SE) != One)
2448    return;
2449  // If the RHS of the comparison is defined inside the loop, the rewrite
2450  // cannot be done.
2451  if (Instruction *CR = dyn_cast<Instruction>(Cond->getOperand(1)))
2452    if (L->contains(CR->getParent()))
2453      return;
2454
2455  // Make sure the IV is only used for counting.  Value may be preinc or
2456  // postinc; 2 uses in either case.
2457  if (!Cond->getOperand(0)->hasNUses(2))
2458    return;
2459  PHINode *phi = dyn_cast<PHINode>(Cond->getOperand(0));
2460  Instruction *incr;
2461  if (phi && phi->getParent()==L->getHeader()) {
2462    // value tested is preinc.  Find the increment.
2463    // A CmpInst is not a BinaryOperator; we depend on this.
2464    Instruction::use_iterator UI = phi->use_begin();
2465    incr = dyn_cast<BinaryOperator>(UI);
2466    if (!incr)
2467      incr = dyn_cast<BinaryOperator>(++UI);
2468    // 1 use for postinc value, the phi.  Unnecessarily conservative?
2469    if (!incr || !incr->hasOneUse() || incr->getOpcode()!=Instruction::Add)
2470      return;
2471  } else {
2472    // Value tested is postinc.  Find the phi node.
2473    incr = dyn_cast<BinaryOperator>(Cond->getOperand(0));
2474    if (!incr || incr->getOpcode()!=Instruction::Add)
2475      return;
2476
2477    Instruction::use_iterator UI = Cond->getOperand(0)->use_begin();
2478    phi = dyn_cast<PHINode>(UI);
2479    if (!phi)
2480      phi = dyn_cast<PHINode>(++UI);
2481    // 1 use for preinc value, the increment.
2482    if (!phi || phi->getParent()!=L->getHeader() || !phi->hasOneUse())
2483      return;
2484  }
2485
2486  // Replace the increment with a decrement.
2487  BinaryOperator *decr =
2488    BinaryOperator::Create(Instruction::Sub, incr->getOperand(0),
2489                           incr->getOperand(1), "tmp", incr);
2490  incr->replaceAllUsesWith(decr);
2491  incr->eraseFromParent();
2492
2493  // Substitute endval-startval for the original startval, and 0 for the
2494  // original endval.  Since we're only testing for equality this is OK even
2495  // if the computation wraps around.
2496  BasicBlock  *Preheader = L->getLoopPreheader();
2497  Instruction *PreInsertPt = Preheader->getTerminator();
2498  int inBlock = L->contains(phi->getIncomingBlock(0)) ? 1 : 0;
2499  Value *startVal = phi->getIncomingValue(inBlock);
2500  Value *endVal = Cond->getOperand(1);
2501  // FIXME check for case where both are constant
2502  Constant* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0);
2503  BinaryOperator *NewStartVal =
2504    BinaryOperator::Create(Instruction::Sub, endVal, startVal,
2505                           "tmp", PreInsertPt);
2506  phi->setIncomingValue(inBlock, NewStartVal);
2507  Cond->setOperand(1, Zero);
2508
2509  Changed = true;
2510}
2511
2512bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2513
2514  IU = &getAnalysis<IVUsers>();
2515  LI = &getAnalysis<LoopInfo>();
2516  DT = &getAnalysis<DominatorTree>();
2517  SE = &getAnalysis<ScalarEvolution>();
2518  Changed = false;
2519
2520  if (!IU->IVUsesByStride.empty()) {
2521    DEBUG(errs() << "\nLSR on \"" << L->getHeader()->getParent()->getName()
2522          << "\" ";
2523          L->dump());
2524
2525    // Sort the StrideOrder so we process larger strides first.
2526    std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(),
2527                     StrideCompare(SE));
2528
2529    // Optimize induction variables.  Some indvar uses can be transformed to use
2530    // strides that will be needed for other purposes.  A common example of this
2531    // is the exit test for the loop, which can often be rewritten to use the
2532    // computation of some other indvar to decide when to terminate the loop.
2533    OptimizeIndvars(L);
2534
2535    // Change loop terminating condition to use the postinc iv when possible
2536    // and optimize loop terminating compare. FIXME: Move this after
2537    // StrengthReduceStridedIVUsers?
2538    OptimizeLoopTermCond(L);
2539
2540    // FIXME: We can shrink overlarge IV's here.  e.g. if the code has
2541    // computation in i64 values and the target doesn't support i64, demote
2542    // the computation to 32-bit if safe.
2543
2544    // FIXME: Attempt to reuse values across multiple IV's.  In particular, we
2545    // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2546    // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2547    // Need to be careful that IV's are all the same type.  Only works for
2548    // intptr_t indvars.
2549
2550    // IVsByStride keeps IVs for one particular loop.
2551    assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2552
2553    // Note: this processes each stride/type pair individually.  All users
2554    // passed into StrengthReduceStridedIVUsers have the same type AND stride.
2555    // Also, note that we iterate over IVUsesByStride indirectly by using
2556    // StrideOrder. This extra layer of indirection makes the ordering of
2557    // strides deterministic - not dependent on map order.
2558    for (unsigned Stride = 0, e = IU->StrideOrder.size();
2559         Stride != e; ++Stride) {
2560      std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2561        IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2562      assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2563      // FIXME: Generalize to non-affine IV's.
2564      if (!SI->first->isLoopInvariant(L))
2565        continue;
2566      StrengthReduceStridedIVUsers(SI->first, *SI->second, L);
2567    }
2568  }
2569
2570  // After all sharing is done, see if we can adjust the loop to test against
2571  // zero instead of counting up to a maximum.  This is usually faster.
2572  OptimizeLoopCountIV(L);
2573
2574  // We're done analyzing this loop; release all the state we built up for it.
2575  IVsByStride.clear();
2576  StrideNoReuse.clear();
2577
2578  // Clean up after ourselves
2579  if (!DeadInsts.empty())
2580    DeleteTriviallyDeadInstructions();
2581
2582  // At this point, it is worth checking to see if any recurrence PHIs are also
2583  // dead, so that we can remove them as well.
2584  DeleteDeadPHIs(L->getHeader());
2585
2586  return Changed;
2587}
2588