ScalarEvolution.cpp revision 07ad19b509530b43e6a9acc5c1285cb560dd7198
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle. We only create one SCEV of a particular shape, so
18// pointer-comparisons for equality are legal.
19//
20// One important aspect of the SCEV objects is that they are never cyclic, even
21// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23// recurrence) then we represent it directly as a recurrence node, otherwise we
24// represent it as a SCEVUnknown node.
25//
26// In addition to being able to represent expressions of various types, we also
27// have folders that are used to build the *canonical* representation for a
28// particular expression.  These folders are capable of using a variety of
29// rewrite rules to simplify the expressions.
30//
31// Once the folders are defined, we can implement the more interesting
32// higher-level code, such as the code that recognizes PHI nodes of various
33// types, computes the execution count of a loop, etc.
34//
35// TODO: We should use these routines and value representations to implement
36// dependence analysis!
37//
38//===----------------------------------------------------------------------===//
39//
40// There are several good references for the techniques used in this analysis.
41//
42//  Chains of recurrences -- a method to expedite the evaluation
43//  of closed-form functions
44//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45//
46//  On computational properties of chains of recurrences
47//  Eugene V. Zima
48//
49//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50//  Robert A. van Engelen
51//
52//  Efficient Symbolic Analysis for Optimizing Compilers
53//  Robert A. van Engelen
54//
55//  Using the chains of recurrences algebra for data dependence testing and
56//  induction variable substitution
57//  MS Thesis, Johnie Birch
58//
59//===----------------------------------------------------------------------===//
60
61#define DEBUG_TYPE "scalar-evolution"
62#include "llvm/Analysis/ScalarEvolutionExpressions.h"
63#include "llvm/Constants.h"
64#include "llvm/DerivedTypes.h"
65#include "llvm/GlobalVariable.h"
66#include "llvm/Instructions.h"
67#include "llvm/LLVMContext.h"
68#include "llvm/Operator.h"
69#include "llvm/Analysis/ConstantFolding.h"
70#include "llvm/Analysis/Dominators.h"
71#include "llvm/Analysis/LoopInfo.h"
72#include "llvm/Analysis/ValueTracking.h"
73#include "llvm/Assembly/Writer.h"
74#include "llvm/Target/TargetData.h"
75#include "llvm/Support/CommandLine.h"
76#include "llvm/Support/Compiler.h"
77#include "llvm/Support/ConstantRange.h"
78#include "llvm/Support/ErrorHandling.h"
79#include "llvm/Support/GetElementPtrTypeIterator.h"
80#include "llvm/Support/InstIterator.h"
81#include "llvm/Support/MathExtras.h"
82#include "llvm/Support/raw_ostream.h"
83#include "llvm/ADT/Statistic.h"
84#include "llvm/ADT/STLExtras.h"
85#include "llvm/ADT/SmallPtrSet.h"
86#include <algorithm>
87using namespace llvm;
88
89STATISTIC(NumArrayLenItCounts,
90          "Number of trip counts computed with array length");
91STATISTIC(NumTripCountsComputed,
92          "Number of loops with predictable loop counts");
93STATISTIC(NumTripCountsNotComputed,
94          "Number of loops without predictable loop counts");
95STATISTIC(NumBruteForceTripCountsComputed,
96          "Number of loops with trip counts computed by force");
97
98static cl::opt<unsigned>
99MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
100                        cl::desc("Maximum number of iterations SCEV will "
101                                 "symbolically execute a constant "
102                                 "derived loop"),
103                        cl::init(100));
104
105static RegisterPass<ScalarEvolution>
106R("scalar-evolution", "Scalar Evolution Analysis", false, true);
107char ScalarEvolution::ID = 0;
108
109//===----------------------------------------------------------------------===//
110//                           SCEV class definitions
111//===----------------------------------------------------------------------===//
112
113//===----------------------------------------------------------------------===//
114// Implementation of the SCEV class.
115//
116
117SCEV::~SCEV() {}
118
119void SCEV::dump() const {
120  print(errs());
121  errs() << '\n';
122}
123
124void SCEV::print(std::ostream &o) const {
125  raw_os_ostream OS(o);
126  print(OS);
127}
128
129bool SCEV::isZero() const {
130  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
131    return SC->getValue()->isZero();
132  return false;
133}
134
135bool SCEV::isOne() const {
136  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
137    return SC->getValue()->isOne();
138  return false;
139}
140
141bool SCEV::isAllOnesValue() const {
142  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
143    return SC->getValue()->isAllOnesValue();
144  return false;
145}
146
147SCEVCouldNotCompute::SCEVCouldNotCompute() :
148  SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
149
150bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
151  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
152  return false;
153}
154
155const Type *SCEVCouldNotCompute::getType() const {
156  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
157  return 0;
158}
159
160bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
161  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
162  return false;
163}
164
165bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
166  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
167  return false;
168}
169
170void SCEVCouldNotCompute::print(raw_ostream &OS) const {
171  OS << "***COULDNOTCOMPUTE***";
172}
173
174bool SCEVCouldNotCompute::classof(const SCEV *S) {
175  return S->getSCEVType() == scCouldNotCompute;
176}
177
178const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
179  FoldingSetNodeID ID;
180  ID.AddInteger(scConstant);
181  ID.AddPointer(V);
182  void *IP = 0;
183  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
184  SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
185  new (S) SCEVConstant(ID, V);
186  UniqueSCEVs.InsertNode(S, IP);
187  return S;
188}
189
190const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
191  return getConstant(ConstantInt::get(getContext(), Val));
192}
193
194const SCEV *
195ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
196  return getConstant(
197    ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
198}
199
200const Type *SCEVConstant::getType() const { return V->getType(); }
201
202void SCEVConstant::print(raw_ostream &OS) const {
203  WriteAsOperand(OS, V, false);
204}
205
206SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
207                           unsigned SCEVTy, const SCEV *op, const Type *ty)
208  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
209
210bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
211  return Op->dominates(BB, DT);
212}
213
214SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
215                                   const SCEV *op, const Type *ty)
216  : SCEVCastExpr(ID, scTruncate, op, ty) {
217  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
218         (Ty->isInteger() || isa<PointerType>(Ty)) &&
219         "Cannot truncate non-integer value!");
220}
221
222void SCEVTruncateExpr::print(raw_ostream &OS) const {
223  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
224}
225
226SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
227                                       const SCEV *op, const Type *ty)
228  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
229  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
230         (Ty->isInteger() || isa<PointerType>(Ty)) &&
231         "Cannot zero extend non-integer value!");
232}
233
234void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
235  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
236}
237
238SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
239                                       const SCEV *op, const Type *ty)
240  : SCEVCastExpr(ID, scSignExtend, op, ty) {
241  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
242         (Ty->isInteger() || isa<PointerType>(Ty)) &&
243         "Cannot sign extend non-integer value!");
244}
245
246void SCEVSignExtendExpr::print(raw_ostream &OS) const {
247  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
248}
249
250void SCEVCommutativeExpr::print(raw_ostream &OS) const {
251  assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
252  const char *OpStr = getOperationStr();
253  OS << "(" << *Operands[0];
254  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
255    OS << OpStr << *Operands[i];
256  OS << ")";
257}
258
259bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
260  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
261    if (!getOperand(i)->dominates(BB, DT))
262      return false;
263  }
264  return true;
265}
266
267bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
268  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
269}
270
271void SCEVUDivExpr::print(raw_ostream &OS) const {
272  OS << "(" << *LHS << " /u " << *RHS << ")";
273}
274
275const Type *SCEVUDivExpr::getType() const {
276  // In most cases the types of LHS and RHS will be the same, but in some
277  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
278  // depend on the type for correctness, but handling types carefully can
279  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
280  // a pointer type than the RHS, so use the RHS' type here.
281  return RHS->getType();
282}
283
284bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
285  // Add recurrences are never invariant in the function-body (null loop).
286  if (!QueryLoop)
287    return false;
288
289  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
290  if (QueryLoop->contains(L->getHeader()))
291    return false;
292
293  // This recurrence is variant w.r.t. QueryLoop if any of its operands
294  // are variant.
295  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
296    if (!getOperand(i)->isLoopInvariant(QueryLoop))
297      return false;
298
299  // Otherwise it's loop-invariant.
300  return true;
301}
302
303void SCEVAddRecExpr::print(raw_ostream &OS) const {
304  OS << "{" << *Operands[0];
305  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
306    OS << ",+," << *Operands[i];
307  OS << "}<" << L->getHeader()->getName() + ">";
308}
309
310bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
311  // All non-instruction values are loop invariant.  All instructions are loop
312  // invariant if they are not contained in the specified loop.
313  // Instructions are never considered invariant in the function body
314  // (null loop) because they are defined within the "loop".
315  if (Instruction *I = dyn_cast<Instruction>(V))
316    return L && !L->contains(I->getParent());
317  return true;
318}
319
320bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
321  if (Instruction *I = dyn_cast<Instruction>(getValue()))
322    return DT->dominates(I->getParent(), BB);
323  return true;
324}
325
326const Type *SCEVUnknown::getType() const {
327  return V->getType();
328}
329
330void SCEVUnknown::print(raw_ostream &OS) const {
331  WriteAsOperand(OS, V, false);
332}
333
334//===----------------------------------------------------------------------===//
335//                               SCEV Utilities
336//===----------------------------------------------------------------------===//
337
338namespace {
339  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
340  /// than the complexity of the RHS.  This comparator is used to canonicalize
341  /// expressions.
342  class VISIBILITY_HIDDEN SCEVComplexityCompare {
343    LoopInfo *LI;
344  public:
345    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
346
347    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
348      // Primarily, sort the SCEVs by their getSCEVType().
349      if (LHS->getSCEVType() != RHS->getSCEVType())
350        return LHS->getSCEVType() < RHS->getSCEVType();
351
352      // Aside from the getSCEVType() ordering, the particular ordering
353      // isn't very important except that it's beneficial to be consistent,
354      // so that (a + b) and (b + a) don't end up as different expressions.
355
356      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
357      // not as complete as it could be.
358      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
359        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
360
361        // Order pointer values after integer values. This helps SCEVExpander
362        // form GEPs.
363        if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
364          return false;
365        if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
366          return true;
367
368        // Compare getValueID values.
369        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
370          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
371
372        // Sort arguments by their position.
373        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
374          const Argument *RA = cast<Argument>(RU->getValue());
375          return LA->getArgNo() < RA->getArgNo();
376        }
377
378        // For instructions, compare their loop depth, and their opcode.
379        // This is pretty loose.
380        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
381          Instruction *RV = cast<Instruction>(RU->getValue());
382
383          // Compare loop depths.
384          if (LI->getLoopDepth(LV->getParent()) !=
385              LI->getLoopDepth(RV->getParent()))
386            return LI->getLoopDepth(LV->getParent()) <
387                   LI->getLoopDepth(RV->getParent());
388
389          // Compare opcodes.
390          if (LV->getOpcode() != RV->getOpcode())
391            return LV->getOpcode() < RV->getOpcode();
392
393          // Compare the number of operands.
394          if (LV->getNumOperands() != RV->getNumOperands())
395            return LV->getNumOperands() < RV->getNumOperands();
396        }
397
398        return false;
399      }
400
401      // Compare constant values.
402      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
403        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
404        if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
405          return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
406        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
407      }
408
409      // Compare addrec loop depths.
410      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
411        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
412        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
413          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
414      }
415
416      // Lexicographically compare n-ary expressions.
417      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
418        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
419        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
420          if (i >= RC->getNumOperands())
421            return false;
422          if (operator()(LC->getOperand(i), RC->getOperand(i)))
423            return true;
424          if (operator()(RC->getOperand(i), LC->getOperand(i)))
425            return false;
426        }
427        return LC->getNumOperands() < RC->getNumOperands();
428      }
429
430      // Lexicographically compare udiv expressions.
431      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
432        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
433        if (operator()(LC->getLHS(), RC->getLHS()))
434          return true;
435        if (operator()(RC->getLHS(), LC->getLHS()))
436          return false;
437        if (operator()(LC->getRHS(), RC->getRHS()))
438          return true;
439        if (operator()(RC->getRHS(), LC->getRHS()))
440          return false;
441        return false;
442      }
443
444      // Compare cast expressions by operand.
445      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
446        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
447        return operator()(LC->getOperand(), RC->getOperand());
448      }
449
450      llvm_unreachable("Unknown SCEV kind!");
451      return false;
452    }
453  };
454}
455
456/// GroupByComplexity - Given a list of SCEV objects, order them by their
457/// complexity, and group objects of the same complexity together by value.
458/// When this routine is finished, we know that any duplicates in the vector are
459/// consecutive and that complexity is monotonically increasing.
460///
461/// Note that we go take special precautions to ensure that we get determinstic
462/// results from this routine.  In other words, we don't want the results of
463/// this to depend on where the addresses of various SCEV objects happened to
464/// land in memory.
465///
466static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
467                              LoopInfo *LI) {
468  if (Ops.size() < 2) return;  // Noop
469  if (Ops.size() == 2) {
470    // This is the common case, which also happens to be trivially simple.
471    // Special case it.
472    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
473      std::swap(Ops[0], Ops[1]);
474    return;
475  }
476
477  // Do the rough sort by complexity.
478  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
479
480  // Now that we are sorted by complexity, group elements of the same
481  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
482  // be extremely short in practice.  Note that we take this approach because we
483  // do not want to depend on the addresses of the objects we are grouping.
484  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
485    const SCEV *S = Ops[i];
486    unsigned Complexity = S->getSCEVType();
487
488    // If there are any objects of the same complexity and same value as this
489    // one, group them.
490    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
491      if (Ops[j] == S) { // Found a duplicate.
492        // Move it to immediately after i'th element.
493        std::swap(Ops[i+1], Ops[j]);
494        ++i;   // no need to rescan it.
495        if (i == e-2) return;  // Done!
496      }
497    }
498  }
499}
500
501
502
503//===----------------------------------------------------------------------===//
504//                      Simple SCEV method implementations
505//===----------------------------------------------------------------------===//
506
507/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
508/// Assume, K > 0.
509static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
510                                       ScalarEvolution &SE,
511                                       const Type* ResultTy) {
512  // Handle the simplest case efficiently.
513  if (K == 1)
514    return SE.getTruncateOrZeroExtend(It, ResultTy);
515
516  // We are using the following formula for BC(It, K):
517  //
518  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
519  //
520  // Suppose, W is the bitwidth of the return value.  We must be prepared for
521  // overflow.  Hence, we must assure that the result of our computation is
522  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
523  // safe in modular arithmetic.
524  //
525  // However, this code doesn't use exactly that formula; the formula it uses
526  // is something like the following, where T is the number of factors of 2 in
527  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
528  // exponentiation:
529  //
530  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
531  //
532  // This formula is trivially equivalent to the previous formula.  However,
533  // this formula can be implemented much more efficiently.  The trick is that
534  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
535  // arithmetic.  To do exact division in modular arithmetic, all we have
536  // to do is multiply by the inverse.  Therefore, this step can be done at
537  // width W.
538  //
539  // The next issue is how to safely do the division by 2^T.  The way this
540  // is done is by doing the multiplication step at a width of at least W + T
541  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
542  // when we perform the division by 2^T (which is equivalent to a right shift
543  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
544  // truncated out after the division by 2^T.
545  //
546  // In comparison to just directly using the first formula, this technique
547  // is much more efficient; using the first formula requires W * K bits,
548  // but this formula less than W + K bits. Also, the first formula requires
549  // a division step, whereas this formula only requires multiplies and shifts.
550  //
551  // It doesn't matter whether the subtraction step is done in the calculation
552  // width or the input iteration count's width; if the subtraction overflows,
553  // the result must be zero anyway.  We prefer here to do it in the width of
554  // the induction variable because it helps a lot for certain cases; CodeGen
555  // isn't smart enough to ignore the overflow, which leads to much less
556  // efficient code if the width of the subtraction is wider than the native
557  // register width.
558  //
559  // (It's possible to not widen at all by pulling out factors of 2 before
560  // the multiplication; for example, K=2 can be calculated as
561  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
562  // extra arithmetic, so it's not an obvious win, and it gets
563  // much more complicated for K > 3.)
564
565  // Protection from insane SCEVs; this bound is conservative,
566  // but it probably doesn't matter.
567  if (K > 1000)
568    return SE.getCouldNotCompute();
569
570  unsigned W = SE.getTypeSizeInBits(ResultTy);
571
572  // Calculate K! / 2^T and T; we divide out the factors of two before
573  // multiplying for calculating K! / 2^T to avoid overflow.
574  // Other overflow doesn't matter because we only care about the bottom
575  // W bits of the result.
576  APInt OddFactorial(W, 1);
577  unsigned T = 1;
578  for (unsigned i = 3; i <= K; ++i) {
579    APInt Mult(W, i);
580    unsigned TwoFactors = Mult.countTrailingZeros();
581    T += TwoFactors;
582    Mult = Mult.lshr(TwoFactors);
583    OddFactorial *= Mult;
584  }
585
586  // We need at least W + T bits for the multiplication step
587  unsigned CalculationBits = W + T;
588
589  // Calcuate 2^T, at width T+W.
590  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
591
592  // Calculate the multiplicative inverse of K! / 2^T;
593  // this multiplication factor will perform the exact division by
594  // K! / 2^T.
595  APInt Mod = APInt::getSignedMinValue(W+1);
596  APInt MultiplyFactor = OddFactorial.zext(W+1);
597  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
598  MultiplyFactor = MultiplyFactor.trunc(W);
599
600  // Calculate the product, at width T+W
601  const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
602  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
603  for (unsigned i = 1; i != K; ++i) {
604    const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
605    Dividend = SE.getMulExpr(Dividend,
606                             SE.getTruncateOrZeroExtend(S, CalculationTy));
607  }
608
609  // Divide by 2^T
610  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
611
612  // Truncate the result, and divide by K! / 2^T.
613
614  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
615                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
616}
617
618/// evaluateAtIteration - Return the value of this chain of recurrences at
619/// the specified iteration number.  We can evaluate this recurrence by
620/// multiplying each element in the chain by the binomial coefficient
621/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
622///
623///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
624///
625/// where BC(It, k) stands for binomial coefficient.
626///
627const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
628                                                ScalarEvolution &SE) const {
629  const SCEV *Result = getStart();
630  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
631    // The computation is correct in the face of overflow provided that the
632    // multiplication is performed _after_ the evaluation of the binomial
633    // coefficient.
634    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
635    if (isa<SCEVCouldNotCompute>(Coeff))
636      return Coeff;
637
638    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
639  }
640  return Result;
641}
642
643//===----------------------------------------------------------------------===//
644//                    SCEV Expression folder implementations
645//===----------------------------------------------------------------------===//
646
647const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
648                                             const Type *Ty) {
649  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
650         "This is not a truncating conversion!");
651  assert(isSCEVable(Ty) &&
652         "This is not a conversion to a SCEVable type!");
653  Ty = getEffectiveSCEVType(Ty);
654
655  FoldingSetNodeID ID;
656  ID.AddInteger(scTruncate);
657  ID.AddPointer(Op);
658  ID.AddPointer(Ty);
659  void *IP = 0;
660  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
661
662  // Fold if the operand is constant.
663  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
664    return getConstant(
665      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
666
667  // trunc(trunc(x)) --> trunc(x)
668  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
669    return getTruncateExpr(ST->getOperand(), Ty);
670
671  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
672  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
673    return getTruncateOrSignExtend(SS->getOperand(), Ty);
674
675  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
676  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
677    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
678
679  // If the input value is a chrec scev, truncate the chrec's operands.
680  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
681    SmallVector<const SCEV *, 4> Operands;
682    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
683      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
684    return getAddRecExpr(Operands, AddRec->getLoop());
685  }
686
687  // The cast wasn't folded; create an explicit cast node.
688  // Recompute the insert position, as it may have been invalidated.
689  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
690  SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
691  new (S) SCEVTruncateExpr(ID, Op, Ty);
692  UniqueSCEVs.InsertNode(S, IP);
693  return S;
694}
695
696const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
697                                               const Type *Ty) {
698  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
699         "This is not an extending conversion!");
700  assert(isSCEVable(Ty) &&
701         "This is not a conversion to a SCEVable type!");
702  Ty = getEffectiveSCEVType(Ty);
703
704  // Fold if the operand is constant.
705  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
706    const Type *IntTy = getEffectiveSCEVType(Ty);
707    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
708    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
709    return getConstant(cast<ConstantInt>(C));
710  }
711
712  // zext(zext(x)) --> zext(x)
713  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
714    return getZeroExtendExpr(SZ->getOperand(), Ty);
715
716  // Before doing any expensive analysis, check to see if we've already
717  // computed a SCEV for this Op and Ty.
718  FoldingSetNodeID ID;
719  ID.AddInteger(scZeroExtend);
720  ID.AddPointer(Op);
721  ID.AddPointer(Ty);
722  void *IP = 0;
723  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
724
725  // If the input value is a chrec scev, and we can prove that the value
726  // did not overflow the old, smaller, value, we can zero extend all of the
727  // operands (often constants).  This allows analysis of something like
728  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
729  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
730    if (AR->isAffine()) {
731      const SCEV *Start = AR->getStart();
732      const SCEV *Step = AR->getStepRecurrence(*this);
733      unsigned BitWidth = getTypeSizeInBits(AR->getType());
734      const Loop *L = AR->getLoop();
735
736      // If we have special knowledge that this addrec won't overflow,
737      // we don't need to do any further analysis.
738      if (AR->hasNoUnsignedOverflow())
739        return getAddRecExpr(getZeroExtendExpr(Start, Ty),
740                             getZeroExtendExpr(Step, Ty),
741                             L);
742
743      // Check whether the backedge-taken count is SCEVCouldNotCompute.
744      // Note that this serves two purposes: It filters out loops that are
745      // simply not analyzable, and it covers the case where this code is
746      // being called from within backedge-taken count analysis, such that
747      // attempting to ask for the backedge-taken count would likely result
748      // in infinite recursion. In the later case, the analysis code will
749      // cope with a conservative value, and it will take care to purge
750      // that value once it has finished.
751      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
752      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
753        // Manually compute the final value for AR, checking for
754        // overflow.
755
756        // Check whether the backedge-taken count can be losslessly casted to
757        // the addrec's type. The count is always unsigned.
758        const SCEV *CastedMaxBECount =
759          getTruncateOrZeroExtend(MaxBECount, Start->getType());
760        const SCEV *RecastedMaxBECount =
761          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
762        if (MaxBECount == RecastedMaxBECount) {
763          const Type *WideTy = IntegerType::get(BitWidth * 2);
764          // Check whether Start+Step*MaxBECount has no unsigned overflow.
765          const SCEV *ZMul =
766            getMulExpr(CastedMaxBECount,
767                       getTruncateOrZeroExtend(Step, Start->getType()));
768          const SCEV *Add = getAddExpr(Start, ZMul);
769          const SCEV *OperandExtendedAdd =
770            getAddExpr(getZeroExtendExpr(Start, WideTy),
771                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
772                                  getZeroExtendExpr(Step, WideTy)));
773          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
774            // Return the expression with the addrec on the outside.
775            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
776                                 getZeroExtendExpr(Step, Ty),
777                                 L);
778
779          // Similar to above, only this time treat the step value as signed.
780          // This covers loops that count down.
781          const SCEV *SMul =
782            getMulExpr(CastedMaxBECount,
783                       getTruncateOrSignExtend(Step, Start->getType()));
784          Add = getAddExpr(Start, SMul);
785          OperandExtendedAdd =
786            getAddExpr(getZeroExtendExpr(Start, WideTy),
787                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
788                                  getSignExtendExpr(Step, WideTy)));
789          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
790            // Return the expression with the addrec on the outside.
791            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
792                                 getSignExtendExpr(Step, Ty),
793                                 L);
794        }
795
796        // If the backedge is guarded by a comparison with the pre-inc value
797        // the addrec is safe. Also, if the entry is guarded by a comparison
798        // with the start value and the backedge is guarded by a comparison
799        // with the post-inc value, the addrec is safe.
800        if (isKnownPositive(Step)) {
801          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
802                                      getUnsignedRange(Step).getUnsignedMax());
803          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
804              (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
805               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
806                                           AR->getPostIncExpr(*this), N)))
807            // Return the expression with the addrec on the outside.
808            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
809                                 getZeroExtendExpr(Step, Ty),
810                                 L);
811        } else if (isKnownNegative(Step)) {
812          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
813                                      getSignedRange(Step).getSignedMin());
814          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
815              (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
816               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
817                                           AR->getPostIncExpr(*this), N)))
818            // Return the expression with the addrec on the outside.
819            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
820                                 getSignExtendExpr(Step, Ty),
821                                 L);
822        }
823      }
824    }
825
826  // The cast wasn't folded; create an explicit cast node.
827  // Recompute the insert position, as it may have been invalidated.
828  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
829  SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
830  new (S) SCEVZeroExtendExpr(ID, Op, Ty);
831  UniqueSCEVs.InsertNode(S, IP);
832  return S;
833}
834
835const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
836                                               const Type *Ty) {
837  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
838         "This is not an extending conversion!");
839  assert(isSCEVable(Ty) &&
840         "This is not a conversion to a SCEVable type!");
841  Ty = getEffectiveSCEVType(Ty);
842
843  // Fold if the operand is constant.
844  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
845    const Type *IntTy = getEffectiveSCEVType(Ty);
846    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
847    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
848    return getConstant(cast<ConstantInt>(C));
849  }
850
851  // sext(sext(x)) --> sext(x)
852  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
853    return getSignExtendExpr(SS->getOperand(), Ty);
854
855  // Before doing any expensive analysis, check to see if we've already
856  // computed a SCEV for this Op and Ty.
857  FoldingSetNodeID ID;
858  ID.AddInteger(scSignExtend);
859  ID.AddPointer(Op);
860  ID.AddPointer(Ty);
861  void *IP = 0;
862  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
863
864  // If the input value is a chrec scev, and we can prove that the value
865  // did not overflow the old, smaller, value, we can sign extend all of the
866  // operands (often constants).  This allows analysis of something like
867  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
868  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
869    if (AR->isAffine()) {
870      const SCEV *Start = AR->getStart();
871      const SCEV *Step = AR->getStepRecurrence(*this);
872      unsigned BitWidth = getTypeSizeInBits(AR->getType());
873      const Loop *L = AR->getLoop();
874
875      // If we have special knowledge that this addrec won't overflow,
876      // we don't need to do any further analysis.
877      if (AR->hasNoSignedOverflow())
878        return getAddRecExpr(getSignExtendExpr(Start, Ty),
879                             getSignExtendExpr(Step, Ty),
880                             L);
881
882      // Check whether the backedge-taken count is SCEVCouldNotCompute.
883      // Note that this serves two purposes: It filters out loops that are
884      // simply not analyzable, and it covers the case where this code is
885      // being called from within backedge-taken count analysis, such that
886      // attempting to ask for the backedge-taken count would likely result
887      // in infinite recursion. In the later case, the analysis code will
888      // cope with a conservative value, and it will take care to purge
889      // that value once it has finished.
890      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
891      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
892        // Manually compute the final value for AR, checking for
893        // overflow.
894
895        // Check whether the backedge-taken count can be losslessly casted to
896        // the addrec's type. The count is always unsigned.
897        const SCEV *CastedMaxBECount =
898          getTruncateOrZeroExtend(MaxBECount, Start->getType());
899        const SCEV *RecastedMaxBECount =
900          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
901        if (MaxBECount == RecastedMaxBECount) {
902          const Type *WideTy = IntegerType::get(BitWidth * 2);
903          // Check whether Start+Step*MaxBECount has no signed overflow.
904          const SCEV *SMul =
905            getMulExpr(CastedMaxBECount,
906                       getTruncateOrSignExtend(Step, Start->getType()));
907          const SCEV *Add = getAddExpr(Start, SMul);
908          const SCEV *OperandExtendedAdd =
909            getAddExpr(getSignExtendExpr(Start, WideTy),
910                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
911                                  getSignExtendExpr(Step, WideTy)));
912          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
913            // Return the expression with the addrec on the outside.
914            return getAddRecExpr(getSignExtendExpr(Start, Ty),
915                                 getSignExtendExpr(Step, Ty),
916                                 L);
917
918          // Similar to above, only this time treat the step value as unsigned.
919          // This covers loops that count up with an unsigned step.
920          const SCEV *UMul =
921            getMulExpr(CastedMaxBECount,
922                       getTruncateOrZeroExtend(Step, Start->getType()));
923          Add = getAddExpr(Start, UMul);
924          OperandExtendedAdd =
925            getAddExpr(getSignExtendExpr(Start, WideTy),
926                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
927                                  getZeroExtendExpr(Step, WideTy)));
928          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
929            // Return the expression with the addrec on the outside.
930            return getAddRecExpr(getSignExtendExpr(Start, Ty),
931                                 getZeroExtendExpr(Step, Ty),
932                                 L);
933        }
934
935        // If the backedge is guarded by a comparison with the pre-inc value
936        // the addrec is safe. Also, if the entry is guarded by a comparison
937        // with the start value and the backedge is guarded by a comparison
938        // with the post-inc value, the addrec is safe.
939        if (isKnownPositive(Step)) {
940          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
941                                      getSignedRange(Step).getSignedMax());
942          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
943              (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
944               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
945                                           AR->getPostIncExpr(*this), N)))
946            // Return the expression with the addrec on the outside.
947            return getAddRecExpr(getSignExtendExpr(Start, Ty),
948                                 getSignExtendExpr(Step, Ty),
949                                 L);
950        } else if (isKnownNegative(Step)) {
951          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
952                                      getSignedRange(Step).getSignedMin());
953          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
954              (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
955               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
956                                           AR->getPostIncExpr(*this), N)))
957            // Return the expression with the addrec on the outside.
958            return getAddRecExpr(getSignExtendExpr(Start, Ty),
959                                 getSignExtendExpr(Step, Ty),
960                                 L);
961        }
962      }
963    }
964
965  // The cast wasn't folded; create an explicit cast node.
966  // Recompute the insert position, as it may have been invalidated.
967  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
968  SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
969  new (S) SCEVSignExtendExpr(ID, Op, Ty);
970  UniqueSCEVs.InsertNode(S, IP);
971  return S;
972}
973
974/// getAnyExtendExpr - Return a SCEV for the given operand extended with
975/// unspecified bits out to the given type.
976///
977const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
978                                             const Type *Ty) {
979  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
980         "This is not an extending conversion!");
981  assert(isSCEVable(Ty) &&
982         "This is not a conversion to a SCEVable type!");
983  Ty = getEffectiveSCEVType(Ty);
984
985  // Sign-extend negative constants.
986  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
987    if (SC->getValue()->getValue().isNegative())
988      return getSignExtendExpr(Op, Ty);
989
990  // Peel off a truncate cast.
991  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
992    const SCEV *NewOp = T->getOperand();
993    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
994      return getAnyExtendExpr(NewOp, Ty);
995    return getTruncateOrNoop(NewOp, Ty);
996  }
997
998  // Next try a zext cast. If the cast is folded, use it.
999  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1000  if (!isa<SCEVZeroExtendExpr>(ZExt))
1001    return ZExt;
1002
1003  // Next try a sext cast. If the cast is folded, use it.
1004  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1005  if (!isa<SCEVSignExtendExpr>(SExt))
1006    return SExt;
1007
1008  // If the expression is obviously signed, use the sext cast value.
1009  if (isa<SCEVSMaxExpr>(Op))
1010    return SExt;
1011
1012  // Absent any other information, use the zext cast value.
1013  return ZExt;
1014}
1015
1016/// CollectAddOperandsWithScales - Process the given Ops list, which is
1017/// a list of operands to be added under the given scale, update the given
1018/// map. This is a helper function for getAddRecExpr. As an example of
1019/// what it does, given a sequence of operands that would form an add
1020/// expression like this:
1021///
1022///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1023///
1024/// where A and B are constants, update the map with these values:
1025///
1026///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1027///
1028/// and add 13 + A*B*29 to AccumulatedConstant.
1029/// This will allow getAddRecExpr to produce this:
1030///
1031///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1032///
1033/// This form often exposes folding opportunities that are hidden in
1034/// the original operand list.
1035///
1036/// Return true iff it appears that any interesting folding opportunities
1037/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1038/// the common case where no interesting opportunities are present, and
1039/// is also used as a check to avoid infinite recursion.
1040///
1041static bool
1042CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1043                             SmallVector<const SCEV *, 8> &NewOps,
1044                             APInt &AccumulatedConstant,
1045                             const SmallVectorImpl<const SCEV *> &Ops,
1046                             const APInt &Scale,
1047                             ScalarEvolution &SE) {
1048  bool Interesting = false;
1049
1050  // Iterate over the add operands.
1051  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1052    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1053    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1054      APInt NewScale =
1055        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1056      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1057        // A multiplication of a constant with another add; recurse.
1058        Interesting |=
1059          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1060                                       cast<SCEVAddExpr>(Mul->getOperand(1))
1061                                         ->getOperands(),
1062                                       NewScale, SE);
1063      } else {
1064        // A multiplication of a constant with some other value. Update
1065        // the map.
1066        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1067        const SCEV *Key = SE.getMulExpr(MulOps);
1068        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1069          M.insert(std::make_pair(Key, NewScale));
1070        if (Pair.second) {
1071          NewOps.push_back(Pair.first->first);
1072        } else {
1073          Pair.first->second += NewScale;
1074          // The map already had an entry for this value, which may indicate
1075          // a folding opportunity.
1076          Interesting = true;
1077        }
1078      }
1079    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1080      // Pull a buried constant out to the outside.
1081      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1082        Interesting = true;
1083      AccumulatedConstant += Scale * C->getValue()->getValue();
1084    } else {
1085      // An ordinary operand. Update the map.
1086      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1087        M.insert(std::make_pair(Ops[i], Scale));
1088      if (Pair.second) {
1089        NewOps.push_back(Pair.first->first);
1090      } else {
1091        Pair.first->second += Scale;
1092        // The map already had an entry for this value, which may indicate
1093        // a folding opportunity.
1094        Interesting = true;
1095      }
1096    }
1097  }
1098
1099  return Interesting;
1100}
1101
1102namespace {
1103  struct APIntCompare {
1104    bool operator()(const APInt &LHS, const APInt &RHS) const {
1105      return LHS.ult(RHS);
1106    }
1107  };
1108}
1109
1110/// getAddExpr - Get a canonical add expression, or something simpler if
1111/// possible.
1112const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1113  assert(!Ops.empty() && "Cannot get empty add!");
1114  if (Ops.size() == 1) return Ops[0];
1115#ifndef NDEBUG
1116  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1117    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1118           getEffectiveSCEVType(Ops[0]->getType()) &&
1119           "SCEVAddExpr operand types don't match!");
1120#endif
1121
1122  // Sort by complexity, this groups all similar expression types together.
1123  GroupByComplexity(Ops, LI);
1124
1125  // If there are any constants, fold them together.
1126  unsigned Idx = 0;
1127  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1128    ++Idx;
1129    assert(Idx < Ops.size());
1130    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1131      // We found two constants, fold them together!
1132      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1133                           RHSC->getValue()->getValue());
1134      if (Ops.size() == 2) return Ops[0];
1135      Ops.erase(Ops.begin()+1);  // Erase the folded element
1136      LHSC = cast<SCEVConstant>(Ops[0]);
1137    }
1138
1139    // If we are left with a constant zero being added, strip it off.
1140    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1141      Ops.erase(Ops.begin());
1142      --Idx;
1143    }
1144  }
1145
1146  if (Ops.size() == 1) return Ops[0];
1147
1148  // Okay, check to see if the same value occurs in the operand list twice.  If
1149  // so, merge them together into an multiply expression.  Since we sorted the
1150  // list, these values are required to be adjacent.
1151  const Type *Ty = Ops[0]->getType();
1152  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1153    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1154      // Found a match, merge the two values into a multiply, and add any
1155      // remaining values to the result.
1156      const SCEV *Two = getIntegerSCEV(2, Ty);
1157      const SCEV *Mul = getMulExpr(Ops[i], Two);
1158      if (Ops.size() == 2)
1159        return Mul;
1160      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1161      Ops.push_back(Mul);
1162      return getAddExpr(Ops);
1163    }
1164
1165  // Check for truncates. If all the operands are truncated from the same
1166  // type, see if factoring out the truncate would permit the result to be
1167  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1168  // if the contents of the resulting outer trunc fold to something simple.
1169  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1170    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1171    const Type *DstType = Trunc->getType();
1172    const Type *SrcType = Trunc->getOperand()->getType();
1173    SmallVector<const SCEV *, 8> LargeOps;
1174    bool Ok = true;
1175    // Check all the operands to see if they can be represented in the
1176    // source type of the truncate.
1177    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1178      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1179        if (T->getOperand()->getType() != SrcType) {
1180          Ok = false;
1181          break;
1182        }
1183        LargeOps.push_back(T->getOperand());
1184      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1185        // This could be either sign or zero extension, but sign extension
1186        // is much more likely to be foldable here.
1187        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1188      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1189        SmallVector<const SCEV *, 8> LargeMulOps;
1190        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1191          if (const SCEVTruncateExpr *T =
1192                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1193            if (T->getOperand()->getType() != SrcType) {
1194              Ok = false;
1195              break;
1196            }
1197            LargeMulOps.push_back(T->getOperand());
1198          } else if (const SCEVConstant *C =
1199                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1200            // This could be either sign or zero extension, but sign extension
1201            // is much more likely to be foldable here.
1202            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1203          } else {
1204            Ok = false;
1205            break;
1206          }
1207        }
1208        if (Ok)
1209          LargeOps.push_back(getMulExpr(LargeMulOps));
1210      } else {
1211        Ok = false;
1212        break;
1213      }
1214    }
1215    if (Ok) {
1216      // Evaluate the expression in the larger type.
1217      const SCEV *Fold = getAddExpr(LargeOps);
1218      // If it folds to something simple, use it. Otherwise, don't.
1219      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1220        return getTruncateExpr(Fold, DstType);
1221    }
1222  }
1223
1224  // Skip past any other cast SCEVs.
1225  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1226    ++Idx;
1227
1228  // If there are add operands they would be next.
1229  if (Idx < Ops.size()) {
1230    bool DeletedAdd = false;
1231    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1232      // If we have an add, expand the add operands onto the end of the operands
1233      // list.
1234      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1235      Ops.erase(Ops.begin()+Idx);
1236      DeletedAdd = true;
1237    }
1238
1239    // If we deleted at least one add, we added operands to the end of the list,
1240    // and they are not necessarily sorted.  Recurse to resort and resimplify
1241    // any operands we just aquired.
1242    if (DeletedAdd)
1243      return getAddExpr(Ops);
1244  }
1245
1246  // Skip over the add expression until we get to a multiply.
1247  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1248    ++Idx;
1249
1250  // Check to see if there are any folding opportunities present with
1251  // operands multiplied by constant values.
1252  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1253    uint64_t BitWidth = getTypeSizeInBits(Ty);
1254    DenseMap<const SCEV *, APInt> M;
1255    SmallVector<const SCEV *, 8> NewOps;
1256    APInt AccumulatedConstant(BitWidth, 0);
1257    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1258                                     Ops, APInt(BitWidth, 1), *this)) {
1259      // Some interesting folding opportunity is present, so its worthwhile to
1260      // re-generate the operands list. Group the operands by constant scale,
1261      // to avoid multiplying by the same constant scale multiple times.
1262      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1263      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1264           E = NewOps.end(); I != E; ++I)
1265        MulOpLists[M.find(*I)->second].push_back(*I);
1266      // Re-generate the operands list.
1267      Ops.clear();
1268      if (AccumulatedConstant != 0)
1269        Ops.push_back(getConstant(AccumulatedConstant));
1270      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1271           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1272        if (I->first != 0)
1273          Ops.push_back(getMulExpr(getConstant(I->first),
1274                                   getAddExpr(I->second)));
1275      if (Ops.empty())
1276        return getIntegerSCEV(0, Ty);
1277      if (Ops.size() == 1)
1278        return Ops[0];
1279      return getAddExpr(Ops);
1280    }
1281  }
1282
1283  // If we are adding something to a multiply expression, make sure the
1284  // something is not already an operand of the multiply.  If so, merge it into
1285  // the multiply.
1286  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1287    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1288    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1289      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1290      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1291        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1292          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1293          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1294          if (Mul->getNumOperands() != 2) {
1295            // If the multiply has more than two operands, we must get the
1296            // Y*Z term.
1297            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1298            MulOps.erase(MulOps.begin()+MulOp);
1299            InnerMul = getMulExpr(MulOps);
1300          }
1301          const SCEV *One = getIntegerSCEV(1, Ty);
1302          const SCEV *AddOne = getAddExpr(InnerMul, One);
1303          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1304          if (Ops.size() == 2) return OuterMul;
1305          if (AddOp < Idx) {
1306            Ops.erase(Ops.begin()+AddOp);
1307            Ops.erase(Ops.begin()+Idx-1);
1308          } else {
1309            Ops.erase(Ops.begin()+Idx);
1310            Ops.erase(Ops.begin()+AddOp-1);
1311          }
1312          Ops.push_back(OuterMul);
1313          return getAddExpr(Ops);
1314        }
1315
1316      // Check this multiply against other multiplies being added together.
1317      for (unsigned OtherMulIdx = Idx+1;
1318           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1319           ++OtherMulIdx) {
1320        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1321        // If MulOp occurs in OtherMul, we can fold the two multiplies
1322        // together.
1323        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1324             OMulOp != e; ++OMulOp)
1325          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1326            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1327            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1328            if (Mul->getNumOperands() != 2) {
1329              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1330                                                  Mul->op_end());
1331              MulOps.erase(MulOps.begin()+MulOp);
1332              InnerMul1 = getMulExpr(MulOps);
1333            }
1334            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1335            if (OtherMul->getNumOperands() != 2) {
1336              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1337                                                  OtherMul->op_end());
1338              MulOps.erase(MulOps.begin()+OMulOp);
1339              InnerMul2 = getMulExpr(MulOps);
1340            }
1341            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1342            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1343            if (Ops.size() == 2) return OuterMul;
1344            Ops.erase(Ops.begin()+Idx);
1345            Ops.erase(Ops.begin()+OtherMulIdx-1);
1346            Ops.push_back(OuterMul);
1347            return getAddExpr(Ops);
1348          }
1349      }
1350    }
1351  }
1352
1353  // If there are any add recurrences in the operands list, see if any other
1354  // added values are loop invariant.  If so, we can fold them into the
1355  // recurrence.
1356  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1357    ++Idx;
1358
1359  // Scan over all recurrences, trying to fold loop invariants into them.
1360  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1361    // Scan all of the other operands to this add and add them to the vector if
1362    // they are loop invariant w.r.t. the recurrence.
1363    SmallVector<const SCEV *, 8> LIOps;
1364    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1365    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1366      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1367        LIOps.push_back(Ops[i]);
1368        Ops.erase(Ops.begin()+i);
1369        --i; --e;
1370      }
1371
1372    // If we found some loop invariants, fold them into the recurrence.
1373    if (!LIOps.empty()) {
1374      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1375      LIOps.push_back(AddRec->getStart());
1376
1377      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1378                                           AddRec->op_end());
1379      AddRecOps[0] = getAddExpr(LIOps);
1380
1381      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1382      // If all of the other operands were loop invariant, we are done.
1383      if (Ops.size() == 1) return NewRec;
1384
1385      // Otherwise, add the folded AddRec by the non-liv parts.
1386      for (unsigned i = 0;; ++i)
1387        if (Ops[i] == AddRec) {
1388          Ops[i] = NewRec;
1389          break;
1390        }
1391      return getAddExpr(Ops);
1392    }
1393
1394    // Okay, if there weren't any loop invariants to be folded, check to see if
1395    // there are multiple AddRec's with the same loop induction variable being
1396    // added together.  If so, we can fold them.
1397    for (unsigned OtherIdx = Idx+1;
1398         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1399      if (OtherIdx != Idx) {
1400        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1401        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1402          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1403          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1404                                              AddRec->op_end());
1405          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1406            if (i >= NewOps.size()) {
1407              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1408                            OtherAddRec->op_end());
1409              break;
1410            }
1411            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1412          }
1413          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1414
1415          if (Ops.size() == 2) return NewAddRec;
1416
1417          Ops.erase(Ops.begin()+Idx);
1418          Ops.erase(Ops.begin()+OtherIdx-1);
1419          Ops.push_back(NewAddRec);
1420          return getAddExpr(Ops);
1421        }
1422      }
1423
1424    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1425    // next one.
1426  }
1427
1428  // Okay, it looks like we really DO need an add expr.  Check to see if we
1429  // already have one, otherwise create a new one.
1430  FoldingSetNodeID ID;
1431  ID.AddInteger(scAddExpr);
1432  ID.AddInteger(Ops.size());
1433  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1434    ID.AddPointer(Ops[i]);
1435  void *IP = 0;
1436  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1437  SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1438  new (S) SCEVAddExpr(ID, Ops);
1439  UniqueSCEVs.InsertNode(S, IP);
1440  return S;
1441}
1442
1443
1444/// getMulExpr - Get a canonical multiply expression, or something simpler if
1445/// possible.
1446const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1447  assert(!Ops.empty() && "Cannot get empty mul!");
1448#ifndef NDEBUG
1449  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1450    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1451           getEffectiveSCEVType(Ops[0]->getType()) &&
1452           "SCEVMulExpr operand types don't match!");
1453#endif
1454
1455  // Sort by complexity, this groups all similar expression types together.
1456  GroupByComplexity(Ops, LI);
1457
1458  // If there are any constants, fold them together.
1459  unsigned Idx = 0;
1460  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1461
1462    // C1*(C2+V) -> C1*C2 + C1*V
1463    if (Ops.size() == 2)
1464      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1465        if (Add->getNumOperands() == 2 &&
1466            isa<SCEVConstant>(Add->getOperand(0)))
1467          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1468                            getMulExpr(LHSC, Add->getOperand(1)));
1469
1470
1471    ++Idx;
1472    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1473      // We found two constants, fold them together!
1474      ConstantInt *Fold = ConstantInt::get(getContext(),
1475                                           LHSC->getValue()->getValue() *
1476                                           RHSC->getValue()->getValue());
1477      Ops[0] = getConstant(Fold);
1478      Ops.erase(Ops.begin()+1);  // Erase the folded element
1479      if (Ops.size() == 1) return Ops[0];
1480      LHSC = cast<SCEVConstant>(Ops[0]);
1481    }
1482
1483    // If we are left with a constant one being multiplied, strip it off.
1484    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1485      Ops.erase(Ops.begin());
1486      --Idx;
1487    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1488      // If we have a multiply of zero, it will always be zero.
1489      return Ops[0];
1490    }
1491  }
1492
1493  // Skip over the add expression until we get to a multiply.
1494  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1495    ++Idx;
1496
1497  if (Ops.size() == 1)
1498    return Ops[0];
1499
1500  // If there are mul operands inline them all into this expression.
1501  if (Idx < Ops.size()) {
1502    bool DeletedMul = false;
1503    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1504      // If we have an mul, expand the mul operands onto the end of the operands
1505      // list.
1506      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1507      Ops.erase(Ops.begin()+Idx);
1508      DeletedMul = true;
1509    }
1510
1511    // If we deleted at least one mul, we added operands to the end of the list,
1512    // and they are not necessarily sorted.  Recurse to resort and resimplify
1513    // any operands we just aquired.
1514    if (DeletedMul)
1515      return getMulExpr(Ops);
1516  }
1517
1518  // If there are any add recurrences in the operands list, see if any other
1519  // added values are loop invariant.  If so, we can fold them into the
1520  // recurrence.
1521  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1522    ++Idx;
1523
1524  // Scan over all recurrences, trying to fold loop invariants into them.
1525  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1526    // Scan all of the other operands to this mul and add them to the vector if
1527    // they are loop invariant w.r.t. the recurrence.
1528    SmallVector<const SCEV *, 8> LIOps;
1529    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1530    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1531      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1532        LIOps.push_back(Ops[i]);
1533        Ops.erase(Ops.begin()+i);
1534        --i; --e;
1535      }
1536
1537    // If we found some loop invariants, fold them into the recurrence.
1538    if (!LIOps.empty()) {
1539      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1540      SmallVector<const SCEV *, 4> NewOps;
1541      NewOps.reserve(AddRec->getNumOperands());
1542      if (LIOps.size() == 1) {
1543        const SCEV *Scale = LIOps[0];
1544        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1545          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1546      } else {
1547        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1548          SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1549          MulOps.push_back(AddRec->getOperand(i));
1550          NewOps.push_back(getMulExpr(MulOps));
1551        }
1552      }
1553
1554      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1555
1556      // If all of the other operands were loop invariant, we are done.
1557      if (Ops.size() == 1) return NewRec;
1558
1559      // Otherwise, multiply the folded AddRec by the non-liv parts.
1560      for (unsigned i = 0;; ++i)
1561        if (Ops[i] == AddRec) {
1562          Ops[i] = NewRec;
1563          break;
1564        }
1565      return getMulExpr(Ops);
1566    }
1567
1568    // Okay, if there weren't any loop invariants to be folded, check to see if
1569    // there are multiple AddRec's with the same loop induction variable being
1570    // multiplied together.  If so, we can fold them.
1571    for (unsigned OtherIdx = Idx+1;
1572         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1573      if (OtherIdx != Idx) {
1574        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1575        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1576          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1577          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1578          const SCEV *NewStart = getMulExpr(F->getStart(),
1579                                                 G->getStart());
1580          const SCEV *B = F->getStepRecurrence(*this);
1581          const SCEV *D = G->getStepRecurrence(*this);
1582          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1583                                          getMulExpr(G, B),
1584                                          getMulExpr(B, D));
1585          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1586                                               F->getLoop());
1587          if (Ops.size() == 2) return NewAddRec;
1588
1589          Ops.erase(Ops.begin()+Idx);
1590          Ops.erase(Ops.begin()+OtherIdx-1);
1591          Ops.push_back(NewAddRec);
1592          return getMulExpr(Ops);
1593        }
1594      }
1595
1596    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1597    // next one.
1598  }
1599
1600  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1601  // already have one, otherwise create a new one.
1602  FoldingSetNodeID ID;
1603  ID.AddInteger(scMulExpr);
1604  ID.AddInteger(Ops.size());
1605  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1606    ID.AddPointer(Ops[i]);
1607  void *IP = 0;
1608  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1609  SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1610  new (S) SCEVMulExpr(ID, Ops);
1611  UniqueSCEVs.InsertNode(S, IP);
1612  return S;
1613}
1614
1615/// getUDivExpr - Get a canonical multiply expression, or something simpler if
1616/// possible.
1617const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1618                                         const SCEV *RHS) {
1619  assert(getEffectiveSCEVType(LHS->getType()) ==
1620         getEffectiveSCEVType(RHS->getType()) &&
1621         "SCEVUDivExpr operand types don't match!");
1622
1623  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1624    if (RHSC->getValue()->equalsInt(1))
1625      return LHS;                            // X udiv 1 --> x
1626    if (RHSC->isZero())
1627      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1628
1629    // Determine if the division can be folded into the operands of
1630    // its operands.
1631    // TODO: Generalize this to non-constants by using known-bits information.
1632    const Type *Ty = LHS->getType();
1633    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1634    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1635    // For non-power-of-two values, effectively round the value up to the
1636    // nearest power of two.
1637    if (!RHSC->getValue()->getValue().isPowerOf2())
1638      ++MaxShiftAmt;
1639    const IntegerType *ExtTy =
1640      IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1641    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1642    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1643      if (const SCEVConstant *Step =
1644            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1645        if (!Step->getValue()->getValue()
1646              .urem(RHSC->getValue()->getValue()) &&
1647            getZeroExtendExpr(AR, ExtTy) ==
1648            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1649                          getZeroExtendExpr(Step, ExtTy),
1650                          AR->getLoop())) {
1651          SmallVector<const SCEV *, 4> Operands;
1652          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1653            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1654          return getAddRecExpr(Operands, AR->getLoop());
1655        }
1656    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1657    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1658      SmallVector<const SCEV *, 4> Operands;
1659      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1660        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1661      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1662        // Find an operand that's safely divisible.
1663        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1664          const SCEV *Op = M->getOperand(i);
1665          const SCEV *Div = getUDivExpr(Op, RHSC);
1666          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1667            const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1668            Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1669                                                  MOperands.end());
1670            Operands[i] = Div;
1671            return getMulExpr(Operands);
1672          }
1673        }
1674    }
1675    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1676    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1677      SmallVector<const SCEV *, 4> Operands;
1678      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1679        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1680      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1681        Operands.clear();
1682        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1683          const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1684          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1685            break;
1686          Operands.push_back(Op);
1687        }
1688        if (Operands.size() == A->getNumOperands())
1689          return getAddExpr(Operands);
1690      }
1691    }
1692
1693    // Fold if both operands are constant.
1694    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1695      Constant *LHSCV = LHSC->getValue();
1696      Constant *RHSCV = RHSC->getValue();
1697      return getConstant(cast<ConstantInt>(getContext().getConstantExprUDiv(LHSCV,
1698                                                                 RHSCV)));
1699    }
1700  }
1701
1702  FoldingSetNodeID ID;
1703  ID.AddInteger(scUDivExpr);
1704  ID.AddPointer(LHS);
1705  ID.AddPointer(RHS);
1706  void *IP = 0;
1707  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1708  SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1709  new (S) SCEVUDivExpr(ID, LHS, RHS);
1710  UniqueSCEVs.InsertNode(S, IP);
1711  return S;
1712}
1713
1714
1715/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1716/// Simplify the expression as much as possible.
1717const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1718                                           const SCEV *Step, const Loop *L) {
1719  SmallVector<const SCEV *, 4> Operands;
1720  Operands.push_back(Start);
1721  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1722    if (StepChrec->getLoop() == L) {
1723      Operands.insert(Operands.end(), StepChrec->op_begin(),
1724                      StepChrec->op_end());
1725      return getAddRecExpr(Operands, L);
1726    }
1727
1728  Operands.push_back(Step);
1729  return getAddRecExpr(Operands, L);
1730}
1731
1732/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1733/// Simplify the expression as much as possible.
1734const SCEV *
1735ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1736                               const Loop *L) {
1737  if (Operands.size() == 1) return Operands[0];
1738#ifndef NDEBUG
1739  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1740    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1741           getEffectiveSCEVType(Operands[0]->getType()) &&
1742           "SCEVAddRecExpr operand types don't match!");
1743#endif
1744
1745  if (Operands.back()->isZero()) {
1746    Operands.pop_back();
1747    return getAddRecExpr(Operands, L);             // {X,+,0}  -->  X
1748  }
1749
1750  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1751  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1752    const Loop* NestedLoop = NestedAR->getLoop();
1753    if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1754      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1755                                                NestedAR->op_end());
1756      Operands[0] = NestedAR->getStart();
1757      // AddRecs require their operands be loop-invariant with respect to their
1758      // loops. Don't perform this transformation if it would break this
1759      // requirement.
1760      bool AllInvariant = true;
1761      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1762        if (!Operands[i]->isLoopInvariant(L)) {
1763          AllInvariant = false;
1764          break;
1765        }
1766      if (AllInvariant) {
1767        NestedOperands[0] = getAddRecExpr(Operands, L);
1768        AllInvariant = true;
1769        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1770          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1771            AllInvariant = false;
1772            break;
1773          }
1774        if (AllInvariant)
1775          // Ok, both add recurrences are valid after the transformation.
1776          return getAddRecExpr(NestedOperands, NestedLoop);
1777      }
1778      // Reset Operands to its original state.
1779      Operands[0] = NestedAR;
1780    }
1781  }
1782
1783  FoldingSetNodeID ID;
1784  ID.AddInteger(scAddRecExpr);
1785  ID.AddInteger(Operands.size());
1786  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1787    ID.AddPointer(Operands[i]);
1788  ID.AddPointer(L);
1789  void *IP = 0;
1790  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1791  SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1792  new (S) SCEVAddRecExpr(ID, Operands, L);
1793  UniqueSCEVs.InsertNode(S, IP);
1794  return S;
1795}
1796
1797const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1798                                         const SCEV *RHS) {
1799  SmallVector<const SCEV *, 2> Ops;
1800  Ops.push_back(LHS);
1801  Ops.push_back(RHS);
1802  return getSMaxExpr(Ops);
1803}
1804
1805const SCEV *
1806ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1807  assert(!Ops.empty() && "Cannot get empty smax!");
1808  if (Ops.size() == 1) return Ops[0];
1809#ifndef NDEBUG
1810  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1811    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1812           getEffectiveSCEVType(Ops[0]->getType()) &&
1813           "SCEVSMaxExpr operand types don't match!");
1814#endif
1815
1816  // Sort by complexity, this groups all similar expression types together.
1817  GroupByComplexity(Ops, LI);
1818
1819  // If there are any constants, fold them together.
1820  unsigned Idx = 0;
1821  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1822    ++Idx;
1823    assert(Idx < Ops.size());
1824    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1825      // We found two constants, fold them together!
1826      ConstantInt *Fold = ConstantInt::get(getContext(),
1827                              APIntOps::smax(LHSC->getValue()->getValue(),
1828                                             RHSC->getValue()->getValue()));
1829      Ops[0] = getConstant(Fold);
1830      Ops.erase(Ops.begin()+1);  // Erase the folded element
1831      if (Ops.size() == 1) return Ops[0];
1832      LHSC = cast<SCEVConstant>(Ops[0]);
1833    }
1834
1835    // If we are left with a constant minimum-int, strip it off.
1836    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1837      Ops.erase(Ops.begin());
1838      --Idx;
1839    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1840      // If we have an smax with a constant maximum-int, it will always be
1841      // maximum-int.
1842      return Ops[0];
1843    }
1844  }
1845
1846  if (Ops.size() == 1) return Ops[0];
1847
1848  // Find the first SMax
1849  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1850    ++Idx;
1851
1852  // Check to see if one of the operands is an SMax. If so, expand its operands
1853  // onto our operand list, and recurse to simplify.
1854  if (Idx < Ops.size()) {
1855    bool DeletedSMax = false;
1856    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1857      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1858      Ops.erase(Ops.begin()+Idx);
1859      DeletedSMax = true;
1860    }
1861
1862    if (DeletedSMax)
1863      return getSMaxExpr(Ops);
1864  }
1865
1866  // Okay, check to see if the same value occurs in the operand list twice.  If
1867  // so, delete one.  Since we sorted the list, these values are required to
1868  // be adjacent.
1869  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1870    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
1871      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1872      --i; --e;
1873    }
1874
1875  if (Ops.size() == 1) return Ops[0];
1876
1877  assert(!Ops.empty() && "Reduced smax down to nothing!");
1878
1879  // Okay, it looks like we really DO need an smax expr.  Check to see if we
1880  // already have one, otherwise create a new one.
1881  FoldingSetNodeID ID;
1882  ID.AddInteger(scSMaxExpr);
1883  ID.AddInteger(Ops.size());
1884  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1885    ID.AddPointer(Ops[i]);
1886  void *IP = 0;
1887  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1888  SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1889  new (S) SCEVSMaxExpr(ID, Ops);
1890  UniqueSCEVs.InsertNode(S, IP);
1891  return S;
1892}
1893
1894const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1895                                         const SCEV *RHS) {
1896  SmallVector<const SCEV *, 2> Ops;
1897  Ops.push_back(LHS);
1898  Ops.push_back(RHS);
1899  return getUMaxExpr(Ops);
1900}
1901
1902const SCEV *
1903ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1904  assert(!Ops.empty() && "Cannot get empty umax!");
1905  if (Ops.size() == 1) return Ops[0];
1906#ifndef NDEBUG
1907  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1908    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1909           getEffectiveSCEVType(Ops[0]->getType()) &&
1910           "SCEVUMaxExpr operand types don't match!");
1911#endif
1912
1913  // Sort by complexity, this groups all similar expression types together.
1914  GroupByComplexity(Ops, LI);
1915
1916  // If there are any constants, fold them together.
1917  unsigned Idx = 0;
1918  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1919    ++Idx;
1920    assert(Idx < Ops.size());
1921    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1922      // We found two constants, fold them together!
1923      ConstantInt *Fold = ConstantInt::get(getContext(),
1924                              APIntOps::umax(LHSC->getValue()->getValue(),
1925                                             RHSC->getValue()->getValue()));
1926      Ops[0] = getConstant(Fold);
1927      Ops.erase(Ops.begin()+1);  // Erase the folded element
1928      if (Ops.size() == 1) return Ops[0];
1929      LHSC = cast<SCEVConstant>(Ops[0]);
1930    }
1931
1932    // If we are left with a constant minimum-int, strip it off.
1933    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1934      Ops.erase(Ops.begin());
1935      --Idx;
1936    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1937      // If we have an umax with a constant maximum-int, it will always be
1938      // maximum-int.
1939      return Ops[0];
1940    }
1941  }
1942
1943  if (Ops.size() == 1) return Ops[0];
1944
1945  // Find the first UMax
1946  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1947    ++Idx;
1948
1949  // Check to see if one of the operands is a UMax. If so, expand its operands
1950  // onto our operand list, and recurse to simplify.
1951  if (Idx < Ops.size()) {
1952    bool DeletedUMax = false;
1953    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1954      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1955      Ops.erase(Ops.begin()+Idx);
1956      DeletedUMax = true;
1957    }
1958
1959    if (DeletedUMax)
1960      return getUMaxExpr(Ops);
1961  }
1962
1963  // Okay, check to see if the same value occurs in the operand list twice.  If
1964  // so, delete one.  Since we sorted the list, these values are required to
1965  // be adjacent.
1966  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1967    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
1968      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1969      --i; --e;
1970    }
1971
1972  if (Ops.size() == 1) return Ops[0];
1973
1974  assert(!Ops.empty() && "Reduced umax down to nothing!");
1975
1976  // Okay, it looks like we really DO need a umax expr.  Check to see if we
1977  // already have one, otherwise create a new one.
1978  FoldingSetNodeID ID;
1979  ID.AddInteger(scUMaxExpr);
1980  ID.AddInteger(Ops.size());
1981  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1982    ID.AddPointer(Ops[i]);
1983  void *IP = 0;
1984  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1985  SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
1986  new (S) SCEVUMaxExpr(ID, Ops);
1987  UniqueSCEVs.InsertNode(S, IP);
1988  return S;
1989}
1990
1991const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
1992                                         const SCEV *RHS) {
1993  // ~smax(~x, ~y) == smin(x, y).
1994  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1995}
1996
1997const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
1998                                         const SCEV *RHS) {
1999  // ~umax(~x, ~y) == umin(x, y)
2000  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2001}
2002
2003const SCEV *ScalarEvolution::getUnknown(Value *V) {
2004  // Don't attempt to do anything other than create a SCEVUnknown object
2005  // here.  createSCEV only calls getUnknown after checking for all other
2006  // interesting possibilities, and any other code that calls getUnknown
2007  // is doing so in order to hide a value from SCEV canonicalization.
2008
2009  FoldingSetNodeID ID;
2010  ID.AddInteger(scUnknown);
2011  ID.AddPointer(V);
2012  void *IP = 0;
2013  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2014  SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2015  new (S) SCEVUnknown(ID, V);
2016  UniqueSCEVs.InsertNode(S, IP);
2017  return S;
2018}
2019
2020//===----------------------------------------------------------------------===//
2021//            Basic SCEV Analysis and PHI Idiom Recognition Code
2022//
2023
2024/// isSCEVable - Test if values of the given type are analyzable within
2025/// the SCEV framework. This primarily includes integer types, and it
2026/// can optionally include pointer types if the ScalarEvolution class
2027/// has access to target-specific information.
2028bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2029  // Integers are always SCEVable.
2030  if (Ty->isInteger())
2031    return true;
2032
2033  // Pointers are SCEVable if TargetData information is available
2034  // to provide pointer size information.
2035  if (isa<PointerType>(Ty))
2036    return TD != NULL;
2037
2038  // Otherwise it's not SCEVable.
2039  return false;
2040}
2041
2042/// getTypeSizeInBits - Return the size in bits of the specified type,
2043/// for which isSCEVable must return true.
2044uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2045  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2046
2047  // If we have a TargetData, use it!
2048  if (TD)
2049    return TD->getTypeSizeInBits(Ty);
2050
2051  // Otherwise, we support only integer types.
2052  assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2053  return Ty->getPrimitiveSizeInBits();
2054}
2055
2056/// getEffectiveSCEVType - Return a type with the same bitwidth as
2057/// the given type and which represents how SCEV will treat the given
2058/// type, for which isSCEVable must return true. For pointer types,
2059/// this is the pointer-sized integer type.
2060const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2061  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2062
2063  if (Ty->isInteger())
2064    return Ty;
2065
2066  assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2067  return TD->getIntPtrType();
2068}
2069
2070const SCEV *ScalarEvolution::getCouldNotCompute() {
2071  return &CouldNotCompute;
2072}
2073
2074/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2075/// expression and create a new one.
2076const SCEV *ScalarEvolution::getSCEV(Value *V) {
2077  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2078
2079  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2080  if (I != Scalars.end()) return I->second;
2081  const SCEV *S = createSCEV(V);
2082  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2083  return S;
2084}
2085
2086/// getIntegerSCEV - Given a SCEVable type, create a constant for the
2087/// specified signed integer value and return a SCEV for the constant.
2088const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2089  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2090  return getConstant(ConstantInt::get(ITy, Val));
2091}
2092
2093/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2094///
2095const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2096  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2097    return getConstant(
2098               cast<ConstantInt>(getContext().getConstantExprNeg(VC->getValue())));
2099
2100  const Type *Ty = V->getType();
2101  Ty = getEffectiveSCEVType(Ty);
2102  return getMulExpr(V,
2103                  getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty))));
2104}
2105
2106/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2107const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2108  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2109    return getConstant(
2110                cast<ConstantInt>(getContext().getConstantExprNot(VC->getValue())));
2111
2112  const Type *Ty = V->getType();
2113  Ty = getEffectiveSCEVType(Ty);
2114  const SCEV *AllOnes =
2115                   getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty)));
2116  return getMinusSCEV(AllOnes, V);
2117}
2118
2119/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2120///
2121const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2122                                          const SCEV *RHS) {
2123  // X - Y --> X + -Y
2124  return getAddExpr(LHS, getNegativeSCEV(RHS));
2125}
2126
2127/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2128/// input value to the specified type.  If the type must be extended, it is zero
2129/// extended.
2130const SCEV *
2131ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2132                                         const Type *Ty) {
2133  const Type *SrcTy = V->getType();
2134  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2135         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2136         "Cannot truncate or zero extend with non-integer arguments!");
2137  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2138    return V;  // No conversion
2139  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2140    return getTruncateExpr(V, Ty);
2141  return getZeroExtendExpr(V, Ty);
2142}
2143
2144/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2145/// input value to the specified type.  If the type must be extended, it is sign
2146/// extended.
2147const SCEV *
2148ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2149                                         const Type *Ty) {
2150  const Type *SrcTy = V->getType();
2151  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2152         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2153         "Cannot truncate or zero extend with non-integer arguments!");
2154  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2155    return V;  // No conversion
2156  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2157    return getTruncateExpr(V, Ty);
2158  return getSignExtendExpr(V, Ty);
2159}
2160
2161/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2162/// input value to the specified type.  If the type must be extended, it is zero
2163/// extended.  The conversion must not be narrowing.
2164const SCEV *
2165ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2166  const Type *SrcTy = V->getType();
2167  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2168         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2169         "Cannot noop or zero extend with non-integer arguments!");
2170  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2171         "getNoopOrZeroExtend cannot truncate!");
2172  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2173    return V;  // No conversion
2174  return getZeroExtendExpr(V, Ty);
2175}
2176
2177/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2178/// input value to the specified type.  If the type must be extended, it is sign
2179/// extended.  The conversion must not be narrowing.
2180const SCEV *
2181ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2182  const Type *SrcTy = V->getType();
2183  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2184         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2185         "Cannot noop or sign extend with non-integer arguments!");
2186  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2187         "getNoopOrSignExtend cannot truncate!");
2188  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2189    return V;  // No conversion
2190  return getSignExtendExpr(V, Ty);
2191}
2192
2193/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2194/// the input value to the specified type. If the type must be extended,
2195/// it is extended with unspecified bits. The conversion must not be
2196/// narrowing.
2197const SCEV *
2198ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2199  const Type *SrcTy = V->getType();
2200  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2201         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2202         "Cannot noop or any extend with non-integer arguments!");
2203  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2204         "getNoopOrAnyExtend cannot truncate!");
2205  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2206    return V;  // No conversion
2207  return getAnyExtendExpr(V, Ty);
2208}
2209
2210/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2211/// input value to the specified type.  The conversion must not be widening.
2212const SCEV *
2213ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2214  const Type *SrcTy = V->getType();
2215  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2216         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2217         "Cannot truncate or noop with non-integer arguments!");
2218  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2219         "getTruncateOrNoop cannot extend!");
2220  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2221    return V;  // No conversion
2222  return getTruncateExpr(V, Ty);
2223}
2224
2225/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2226/// the types using zero-extension, and then perform a umax operation
2227/// with them.
2228const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2229                                                        const SCEV *RHS) {
2230  const SCEV *PromotedLHS = LHS;
2231  const SCEV *PromotedRHS = RHS;
2232
2233  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2234    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2235  else
2236    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2237
2238  return getUMaxExpr(PromotedLHS, PromotedRHS);
2239}
2240
2241/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2242/// the types using zero-extension, and then perform a umin operation
2243/// with them.
2244const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2245                                                        const SCEV *RHS) {
2246  const SCEV *PromotedLHS = LHS;
2247  const SCEV *PromotedRHS = RHS;
2248
2249  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2250    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2251  else
2252    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2253
2254  return getUMinExpr(PromotedLHS, PromotedRHS);
2255}
2256
2257/// PushDefUseChildren - Push users of the given Instruction
2258/// onto the given Worklist.
2259static void
2260PushDefUseChildren(Instruction *I,
2261                   SmallVectorImpl<Instruction *> &Worklist) {
2262  // Push the def-use children onto the Worklist stack.
2263  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2264       UI != UE; ++UI)
2265    Worklist.push_back(cast<Instruction>(UI));
2266}
2267
2268/// ForgetSymbolicValue - This looks up computed SCEV values for all
2269/// instructions that depend on the given instruction and removes them from
2270/// the Scalars map if they reference SymName. This is used during PHI
2271/// resolution.
2272void
2273ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) {
2274  SmallVector<Instruction *, 16> Worklist;
2275  PushDefUseChildren(I, Worklist);
2276
2277  SmallPtrSet<Instruction *, 8> Visited;
2278  Visited.insert(I);
2279  while (!Worklist.empty()) {
2280    Instruction *I = Worklist.pop_back_val();
2281    if (!Visited.insert(I)) continue;
2282
2283    std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2284      Scalars.find(static_cast<Value *>(I));
2285    if (It != Scalars.end()) {
2286      // Short-circuit the def-use traversal if the symbolic name
2287      // ceases to appear in expressions.
2288      if (!It->second->hasOperand(SymName))
2289        continue;
2290
2291      // SCEVUnknown for a PHI either means that it has an unrecognized
2292      // structure, or it's a PHI that's in the progress of being computed
2293      // by createNodeForPHI.  In the former case, additional loop trip
2294      // count information isn't going to change anything. In the later
2295      // case, createNodeForPHI will perform the necessary updates on its
2296      // own when it gets to that point.
2297      if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
2298        Scalars.erase(It);
2299      ValuesAtScopes.erase(I);
2300    }
2301
2302    PushDefUseChildren(I, Worklist);
2303  }
2304}
2305
2306/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2307/// a loop header, making it a potential recurrence, or it doesn't.
2308///
2309const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2310  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2311    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2312      if (L->getHeader() == PN->getParent()) {
2313        // If it lives in the loop header, it has two incoming values, one
2314        // from outside the loop, and one from inside.
2315        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2316        unsigned BackEdge     = IncomingEdge^1;
2317
2318        // While we are analyzing this PHI node, handle its value symbolically.
2319        const SCEV *SymbolicName = getUnknown(PN);
2320        assert(Scalars.find(PN) == Scalars.end() &&
2321               "PHI node already processed?");
2322        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2323
2324        // Using this symbolic name for the PHI, analyze the value coming around
2325        // the back-edge.
2326        Value *BEValueV = PN->getIncomingValue(BackEdge);
2327        const SCEV *BEValue = getSCEV(BEValueV);
2328
2329        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2330        // has a special value for the first iteration of the loop.
2331
2332        // If the value coming around the backedge is an add with the symbolic
2333        // value we just inserted, then we found a simple induction variable!
2334        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2335          // If there is a single occurrence of the symbolic value, replace it
2336          // with a recurrence.
2337          unsigned FoundIndex = Add->getNumOperands();
2338          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2339            if (Add->getOperand(i) == SymbolicName)
2340              if (FoundIndex == e) {
2341                FoundIndex = i;
2342                break;
2343              }
2344
2345          if (FoundIndex != Add->getNumOperands()) {
2346            // Create an add with everything but the specified operand.
2347            SmallVector<const SCEV *, 8> Ops;
2348            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2349              if (i != FoundIndex)
2350                Ops.push_back(Add->getOperand(i));
2351            const SCEV *Accum = getAddExpr(Ops);
2352
2353            // This is not a valid addrec if the step amount is varying each
2354            // loop iteration, but is not itself an addrec in this loop.
2355            if (Accum->isLoopInvariant(L) ||
2356                (isa<SCEVAddRecExpr>(Accum) &&
2357                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2358              const SCEV *StartVal =
2359                getSCEV(PN->getIncomingValue(IncomingEdge));
2360              const SCEVAddRecExpr *PHISCEV =
2361                cast<SCEVAddRecExpr>(getAddRecExpr(StartVal, Accum, L));
2362
2363              // If the increment doesn't overflow, then neither the addrec nor the
2364              // post-increment will overflow.
2365              if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV))
2366                if (OBO->getOperand(0) == PN &&
2367                    getSCEV(OBO->getOperand(1)) ==
2368                      PHISCEV->getStepRecurrence(*this)) {
2369                  const SCEVAddRecExpr *PostInc = PHISCEV->getPostIncExpr(*this);
2370                  if (OBO->hasNoUnsignedOverflow()) {
2371                    const_cast<SCEVAddRecExpr *>(PHISCEV)
2372                      ->setHasNoUnsignedOverflow(true);
2373                    const_cast<SCEVAddRecExpr *>(PostInc)
2374                      ->setHasNoUnsignedOverflow(true);
2375                  }
2376                  if (OBO->hasNoSignedOverflow()) {
2377                    const_cast<SCEVAddRecExpr *>(PHISCEV)
2378                      ->setHasNoSignedOverflow(true);
2379                    const_cast<SCEVAddRecExpr *>(PostInc)
2380                      ->setHasNoSignedOverflow(true);
2381                  }
2382                }
2383
2384              // Okay, for the entire analysis of this edge we assumed the PHI
2385              // to be symbolic.  We now need to go back and purge all of the
2386              // entries for the scalars that use the symbolic expression.
2387              ForgetSymbolicName(PN, SymbolicName);
2388              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2389              return PHISCEV;
2390            }
2391          }
2392        } else if (const SCEVAddRecExpr *AddRec =
2393                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2394          // Otherwise, this could be a loop like this:
2395          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2396          // In this case, j = {1,+,1}  and BEValue is j.
2397          // Because the other in-value of i (0) fits the evolution of BEValue
2398          // i really is an addrec evolution.
2399          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2400            const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2401
2402            // If StartVal = j.start - j.stride, we can use StartVal as the
2403            // initial step of the addrec evolution.
2404            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2405                                            AddRec->getOperand(1))) {
2406              const SCEV *PHISCEV =
2407                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2408
2409              // Okay, for the entire analysis of this edge we assumed the PHI
2410              // to be symbolic.  We now need to go back and purge all of the
2411              // entries for the scalars that use the symbolic expression.
2412              ForgetSymbolicName(PN, SymbolicName);
2413              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2414              return PHISCEV;
2415            }
2416          }
2417        }
2418
2419        return SymbolicName;
2420      }
2421
2422  // It's tempting to recognize PHIs with a unique incoming value, however
2423  // this leads passes like indvars to break LCSSA form. Fortunately, such
2424  // PHIs are rare, as instcombine zaps them.
2425
2426  // If it's not a loop phi, we can't handle it yet.
2427  return getUnknown(PN);
2428}
2429
2430/// createNodeForGEP - Expand GEP instructions into add and multiply
2431/// operations. This allows them to be analyzed by regular SCEV code.
2432///
2433const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) {
2434
2435  const Type *IntPtrTy = TD->getIntPtrType();
2436  Value *Base = GEP->getOperand(0);
2437  // Don't attempt to analyze GEPs over unsized objects.
2438  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2439    return getUnknown(GEP);
2440  const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2441  gep_type_iterator GTI = gep_type_begin(GEP);
2442  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2443                                      E = GEP->op_end();
2444       I != E; ++I) {
2445    Value *Index = *I;
2446    // Compute the (potentially symbolic) offset in bytes for this index.
2447    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2448      // For a struct, add the member offset.
2449      const StructLayout &SL = *TD->getStructLayout(STy);
2450      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2451      uint64_t Offset = SL.getElementOffset(FieldNo);
2452      TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy));
2453    } else {
2454      // For an array, add the element offset, explicitly scaled.
2455      const SCEV *LocalOffset = getSCEV(Index);
2456      if (!isa<PointerType>(LocalOffset->getType()))
2457        // Getelementptr indicies are signed.
2458        LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2459      LocalOffset =
2460        getMulExpr(LocalOffset,
2461                   getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy));
2462      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2463    }
2464  }
2465  return getAddExpr(getSCEV(Base), TotalOffset);
2466}
2467
2468/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2469/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2470/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2471/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2472uint32_t
2473ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2474  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2475    return C->getValue()->getValue().countTrailingZeros();
2476
2477  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2478    return std::min(GetMinTrailingZeros(T->getOperand()),
2479                    (uint32_t)getTypeSizeInBits(T->getType()));
2480
2481  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2482    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2483    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2484             getTypeSizeInBits(E->getType()) : OpRes;
2485  }
2486
2487  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2488    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2489    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2490             getTypeSizeInBits(E->getType()) : OpRes;
2491  }
2492
2493  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2494    // The result is the min of all operands results.
2495    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2496    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2497      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2498    return MinOpRes;
2499  }
2500
2501  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2502    // The result is the sum of all operands results.
2503    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2504    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2505    for (unsigned i = 1, e = M->getNumOperands();
2506         SumOpRes != BitWidth && i != e; ++i)
2507      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2508                          BitWidth);
2509    return SumOpRes;
2510  }
2511
2512  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2513    // The result is the min of all operands results.
2514    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2515    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2516      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2517    return MinOpRes;
2518  }
2519
2520  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2521    // The result is the min of all operands results.
2522    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2523    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2524      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2525    return MinOpRes;
2526  }
2527
2528  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2529    // The result is the min of all operands results.
2530    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2531    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2532      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2533    return MinOpRes;
2534  }
2535
2536  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2537    // For a SCEVUnknown, ask ValueTracking.
2538    unsigned BitWidth = getTypeSizeInBits(U->getType());
2539    APInt Mask = APInt::getAllOnesValue(BitWidth);
2540    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2541    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2542    return Zeros.countTrailingOnes();
2543  }
2544
2545  // SCEVUDivExpr
2546  return 0;
2547}
2548
2549/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2550///
2551ConstantRange
2552ScalarEvolution::getUnsignedRange(const SCEV *S) {
2553
2554  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2555    return ConstantRange(C->getValue()->getValue());
2556
2557  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2558    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2559    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2560      X = X.add(getUnsignedRange(Add->getOperand(i)));
2561    return X;
2562  }
2563
2564  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2565    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2566    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2567      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2568    return X;
2569  }
2570
2571  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2572    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2573    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2574      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2575    return X;
2576  }
2577
2578  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2579    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2580    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2581      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2582    return X;
2583  }
2584
2585  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2586    ConstantRange X = getUnsignedRange(UDiv->getLHS());
2587    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2588    return X.udiv(Y);
2589  }
2590
2591  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2592    ConstantRange X = getUnsignedRange(ZExt->getOperand());
2593    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2594  }
2595
2596  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2597    ConstantRange X = getUnsignedRange(SExt->getOperand());
2598    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2599  }
2600
2601  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2602    ConstantRange X = getUnsignedRange(Trunc->getOperand());
2603    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2604  }
2605
2606  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2607
2608  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2609    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2610    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2611    if (!Trip) return FullSet;
2612
2613    // TODO: non-affine addrec
2614    if (AddRec->isAffine()) {
2615      const Type *Ty = AddRec->getType();
2616      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2617      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2618        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2619
2620        const SCEV *Start = AddRec->getStart();
2621        const SCEV *Step = AddRec->getStepRecurrence(*this);
2622        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2623
2624        // Check for overflow.
2625        // TODO: This is very conservative.
2626        if (!(Step->isOne() &&
2627              isKnownPredicate(ICmpInst::ICMP_ULT, Start, End)) &&
2628            !(Step->isAllOnesValue() &&
2629              isKnownPredicate(ICmpInst::ICMP_UGT, Start, End)))
2630          return FullSet;
2631
2632        ConstantRange StartRange = getUnsignedRange(Start);
2633        ConstantRange EndRange = getUnsignedRange(End);
2634        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2635                                   EndRange.getUnsignedMin());
2636        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2637                                   EndRange.getUnsignedMax());
2638        if (Min.isMinValue() && Max.isMaxValue())
2639          return FullSet;
2640        return ConstantRange(Min, Max+1);
2641      }
2642    }
2643  }
2644
2645  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2646    // For a SCEVUnknown, ask ValueTracking.
2647    unsigned BitWidth = getTypeSizeInBits(U->getType());
2648    APInt Mask = APInt::getAllOnesValue(BitWidth);
2649    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2650    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2651    if (Ones == ~Zeros + 1)
2652      return FullSet;
2653    return ConstantRange(Ones, ~Zeros + 1);
2654  }
2655
2656  return FullSet;
2657}
2658
2659/// getSignedRange - Determine the signed range for a particular SCEV.
2660///
2661ConstantRange
2662ScalarEvolution::getSignedRange(const SCEV *S) {
2663
2664  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2665    return ConstantRange(C->getValue()->getValue());
2666
2667  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2668    ConstantRange X = getSignedRange(Add->getOperand(0));
2669    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2670      X = X.add(getSignedRange(Add->getOperand(i)));
2671    return X;
2672  }
2673
2674  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2675    ConstantRange X = getSignedRange(Mul->getOperand(0));
2676    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2677      X = X.multiply(getSignedRange(Mul->getOperand(i)));
2678    return X;
2679  }
2680
2681  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2682    ConstantRange X = getSignedRange(SMax->getOperand(0));
2683    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2684      X = X.smax(getSignedRange(SMax->getOperand(i)));
2685    return X;
2686  }
2687
2688  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2689    ConstantRange X = getSignedRange(UMax->getOperand(0));
2690    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2691      X = X.umax(getSignedRange(UMax->getOperand(i)));
2692    return X;
2693  }
2694
2695  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2696    ConstantRange X = getSignedRange(UDiv->getLHS());
2697    ConstantRange Y = getSignedRange(UDiv->getRHS());
2698    return X.udiv(Y);
2699  }
2700
2701  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2702    ConstantRange X = getSignedRange(ZExt->getOperand());
2703    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2704  }
2705
2706  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2707    ConstantRange X = getSignedRange(SExt->getOperand());
2708    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2709  }
2710
2711  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2712    ConstantRange X = getSignedRange(Trunc->getOperand());
2713    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2714  }
2715
2716  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2717
2718  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2719    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2720    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2721    if (!Trip) return FullSet;
2722
2723    // TODO: non-affine addrec
2724    if (AddRec->isAffine()) {
2725      const Type *Ty = AddRec->getType();
2726      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2727      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2728        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2729
2730        const SCEV *Start = AddRec->getStart();
2731        const SCEV *Step = AddRec->getStepRecurrence(*this);
2732        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2733
2734        // Check for overflow.
2735        // TODO: This is very conservative.
2736        if (!(Step->isOne() &&
2737              isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) &&
2738            !(Step->isAllOnesValue() &&
2739              isKnownPredicate(ICmpInst::ICMP_SGT, Start, End)))
2740          return FullSet;
2741
2742        ConstantRange StartRange = getSignedRange(Start);
2743        ConstantRange EndRange = getSignedRange(End);
2744        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
2745                                   EndRange.getSignedMin());
2746        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
2747                                   EndRange.getSignedMax());
2748        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
2749          return FullSet;
2750        return ConstantRange(Min, Max+1);
2751      }
2752    }
2753  }
2754
2755  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2756    // For a SCEVUnknown, ask ValueTracking.
2757    unsigned BitWidth = getTypeSizeInBits(U->getType());
2758    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
2759    if (NS == 1)
2760      return FullSet;
2761    return
2762      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
2763                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1);
2764  }
2765
2766  return FullSet;
2767}
2768
2769/// createSCEV - We know that there is no SCEV for the specified value.
2770/// Analyze the expression.
2771///
2772const SCEV *ScalarEvolution::createSCEV(Value *V) {
2773  if (!isSCEVable(V->getType()))
2774    return getUnknown(V);
2775
2776  unsigned Opcode = Instruction::UserOp1;
2777  if (Instruction *I = dyn_cast<Instruction>(V))
2778    Opcode = I->getOpcode();
2779  else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2780    Opcode = CE->getOpcode();
2781  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2782    return getConstant(CI);
2783  else if (isa<ConstantPointerNull>(V))
2784    return getIntegerSCEV(0, V->getType());
2785  else if (isa<UndefValue>(V))
2786    return getIntegerSCEV(0, V->getType());
2787  else
2788    return getUnknown(V);
2789
2790  Operator *U = cast<Operator>(V);
2791  switch (Opcode) {
2792  case Instruction::Add:
2793    return getAddExpr(getSCEV(U->getOperand(0)),
2794                      getSCEV(U->getOperand(1)));
2795  case Instruction::Mul:
2796    return getMulExpr(getSCEV(U->getOperand(0)),
2797                      getSCEV(U->getOperand(1)));
2798  case Instruction::UDiv:
2799    return getUDivExpr(getSCEV(U->getOperand(0)),
2800                       getSCEV(U->getOperand(1)));
2801  case Instruction::Sub:
2802    return getMinusSCEV(getSCEV(U->getOperand(0)),
2803                        getSCEV(U->getOperand(1)));
2804  case Instruction::And:
2805    // For an expression like x&255 that merely masks off the high bits,
2806    // use zext(trunc(x)) as the SCEV expression.
2807    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2808      if (CI->isNullValue())
2809        return getSCEV(U->getOperand(1));
2810      if (CI->isAllOnesValue())
2811        return getSCEV(U->getOperand(0));
2812      const APInt &A = CI->getValue();
2813
2814      // Instcombine's ShrinkDemandedConstant may strip bits out of
2815      // constants, obscuring what would otherwise be a low-bits mask.
2816      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2817      // knew about to reconstruct a low-bits mask value.
2818      unsigned LZ = A.countLeadingZeros();
2819      unsigned BitWidth = A.getBitWidth();
2820      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2821      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2822      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2823
2824      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2825
2826      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2827        return
2828          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2829                                            IntegerType::get(BitWidth - LZ)),
2830                            U->getType());
2831    }
2832    break;
2833
2834  case Instruction::Or:
2835    // If the RHS of the Or is a constant, we may have something like:
2836    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
2837    // optimizations will transparently handle this case.
2838    //
2839    // In order for this transformation to be safe, the LHS must be of the
2840    // form X*(2^n) and the Or constant must be less than 2^n.
2841    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2842      const SCEV *LHS = getSCEV(U->getOperand(0));
2843      const APInt &CIVal = CI->getValue();
2844      if (GetMinTrailingZeros(LHS) >=
2845          (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2846        return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2847    }
2848    break;
2849  case Instruction::Xor:
2850    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2851      // If the RHS of the xor is a signbit, then this is just an add.
2852      // Instcombine turns add of signbit into xor as a strength reduction step.
2853      if (CI->getValue().isSignBit())
2854        return getAddExpr(getSCEV(U->getOperand(0)),
2855                          getSCEV(U->getOperand(1)));
2856
2857      // If the RHS of xor is -1, then this is a not operation.
2858      if (CI->isAllOnesValue())
2859        return getNotSCEV(getSCEV(U->getOperand(0)));
2860
2861      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2862      // This is a variant of the check for xor with -1, and it handles
2863      // the case where instcombine has trimmed non-demanded bits out
2864      // of an xor with -1.
2865      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2866        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2867          if (BO->getOpcode() == Instruction::And &&
2868              LCI->getValue() == CI->getValue())
2869            if (const SCEVZeroExtendExpr *Z =
2870                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2871              const Type *UTy = U->getType();
2872              const SCEV *Z0 = Z->getOperand();
2873              const Type *Z0Ty = Z0->getType();
2874              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2875
2876              // If C is a low-bits mask, the zero extend is zerving to
2877              // mask off the high bits. Complement the operand and
2878              // re-apply the zext.
2879              if (APIntOps::isMask(Z0TySize, CI->getValue()))
2880                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2881
2882              // If C is a single bit, it may be in the sign-bit position
2883              // before the zero-extend. In this case, represent the xor
2884              // using an add, which is equivalent, and re-apply the zext.
2885              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2886              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2887                  Trunc.isSignBit())
2888                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2889                                         UTy);
2890            }
2891    }
2892    break;
2893
2894  case Instruction::Shl:
2895    // Turn shift left of a constant amount into a multiply.
2896    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2897      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2898      Constant *X = ConstantInt::get(getContext(),
2899        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2900      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2901    }
2902    break;
2903
2904  case Instruction::LShr:
2905    // Turn logical shift right of a constant into a unsigned divide.
2906    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2907      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2908      Constant *X = ConstantInt::get(getContext(),
2909        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2910      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2911    }
2912    break;
2913
2914  case Instruction::AShr:
2915    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2916    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2917      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2918        if (L->getOpcode() == Instruction::Shl &&
2919            L->getOperand(1) == U->getOperand(1)) {
2920          unsigned BitWidth = getTypeSizeInBits(U->getType());
2921          uint64_t Amt = BitWidth - CI->getZExtValue();
2922          if (Amt == BitWidth)
2923            return getSCEV(L->getOperand(0));       // shift by zero --> noop
2924          if (Amt > BitWidth)
2925            return getIntegerSCEV(0, U->getType()); // value is undefined
2926          return
2927            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2928                                                      IntegerType::get(Amt)),
2929                                 U->getType());
2930        }
2931    break;
2932
2933  case Instruction::Trunc:
2934    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2935
2936  case Instruction::ZExt:
2937    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2938
2939  case Instruction::SExt:
2940    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2941
2942  case Instruction::BitCast:
2943    // BitCasts are no-op casts so we just eliminate the cast.
2944    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2945      return getSCEV(U->getOperand(0));
2946    break;
2947
2948    // It's tempting to handle inttoptr and ptrtoint, however this can
2949    // lead to pointer expressions which cannot be expanded to GEPs
2950    // (because they may overflow). For now, the only pointer-typed
2951    // expressions we handle are GEPs and address literals.
2952
2953  case Instruction::GetElementPtr:
2954    if (!TD) break; // Without TD we can't analyze pointers.
2955    return createNodeForGEP(U);
2956
2957  case Instruction::PHI:
2958    return createNodeForPHI(cast<PHINode>(U));
2959
2960  case Instruction::Select:
2961    // This could be a smax or umax that was lowered earlier.
2962    // Try to recover it.
2963    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2964      Value *LHS = ICI->getOperand(0);
2965      Value *RHS = ICI->getOperand(1);
2966      switch (ICI->getPredicate()) {
2967      case ICmpInst::ICMP_SLT:
2968      case ICmpInst::ICMP_SLE:
2969        std::swap(LHS, RHS);
2970        // fall through
2971      case ICmpInst::ICMP_SGT:
2972      case ICmpInst::ICMP_SGE:
2973        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2974          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2975        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2976          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2977        break;
2978      case ICmpInst::ICMP_ULT:
2979      case ICmpInst::ICMP_ULE:
2980        std::swap(LHS, RHS);
2981        // fall through
2982      case ICmpInst::ICMP_UGT:
2983      case ICmpInst::ICMP_UGE:
2984        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2985          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2986        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2987          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2988        break;
2989      case ICmpInst::ICMP_NE:
2990        // n != 0 ? n : 1  ->  umax(n, 1)
2991        if (LHS == U->getOperand(1) &&
2992            isa<ConstantInt>(U->getOperand(2)) &&
2993            cast<ConstantInt>(U->getOperand(2))->isOne() &&
2994            isa<ConstantInt>(RHS) &&
2995            cast<ConstantInt>(RHS)->isZero())
2996          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2997        break;
2998      case ICmpInst::ICMP_EQ:
2999        // n == 0 ? 1 : n  ->  umax(n, 1)
3000        if (LHS == U->getOperand(2) &&
3001            isa<ConstantInt>(U->getOperand(1)) &&
3002            cast<ConstantInt>(U->getOperand(1))->isOne() &&
3003            isa<ConstantInt>(RHS) &&
3004            cast<ConstantInt>(RHS)->isZero())
3005          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
3006        break;
3007      default:
3008        break;
3009      }
3010    }
3011
3012  default: // We cannot analyze this expression.
3013    break;
3014  }
3015
3016  return getUnknown(V);
3017}
3018
3019
3020
3021//===----------------------------------------------------------------------===//
3022//                   Iteration Count Computation Code
3023//
3024
3025/// getBackedgeTakenCount - If the specified loop has a predictable
3026/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3027/// object. The backedge-taken count is the number of times the loop header
3028/// will be branched to from within the loop. This is one less than the
3029/// trip count of the loop, since it doesn't count the first iteration,
3030/// when the header is branched to from outside the loop.
3031///
3032/// Note that it is not valid to call this method on a loop without a
3033/// loop-invariant backedge-taken count (see
3034/// hasLoopInvariantBackedgeTakenCount).
3035///
3036const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3037  return getBackedgeTakenInfo(L).Exact;
3038}
3039
3040/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3041/// return the least SCEV value that is known never to be less than the
3042/// actual backedge taken count.
3043const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3044  return getBackedgeTakenInfo(L).Max;
3045}
3046
3047/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3048/// onto the given Worklist.
3049static void
3050PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3051  BasicBlock *Header = L->getHeader();
3052
3053  // Push all Loop-header PHIs onto the Worklist stack.
3054  for (BasicBlock::iterator I = Header->begin();
3055       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3056    Worklist.push_back(PN);
3057}
3058
3059const ScalarEvolution::BackedgeTakenInfo &
3060ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3061  // Initially insert a CouldNotCompute for this loop. If the insertion
3062  // succeeds, procede to actually compute a backedge-taken count and
3063  // update the value. The temporary CouldNotCompute value tells SCEV
3064  // code elsewhere that it shouldn't attempt to request a new
3065  // backedge-taken count, which could result in infinite recursion.
3066  std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
3067    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3068  if (Pair.second) {
3069    BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
3070    if (ItCount.Exact != getCouldNotCompute()) {
3071      assert(ItCount.Exact->isLoopInvariant(L) &&
3072             ItCount.Max->isLoopInvariant(L) &&
3073             "Computed trip count isn't loop invariant for loop!");
3074      ++NumTripCountsComputed;
3075
3076      // Update the value in the map.
3077      Pair.first->second = ItCount;
3078    } else {
3079      if (ItCount.Max != getCouldNotCompute())
3080        // Update the value in the map.
3081        Pair.first->second = ItCount;
3082      if (isa<PHINode>(L->getHeader()->begin()))
3083        // Only count loops that have phi nodes as not being computable.
3084        ++NumTripCountsNotComputed;
3085    }
3086
3087    // Now that we know more about the trip count for this loop, forget any
3088    // existing SCEV values for PHI nodes in this loop since they are only
3089    // conservative estimates made without the benefit of trip count
3090    // information. This is similar to the code in
3091    // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
3092    // nodes specially.
3093    if (ItCount.hasAnyInfo()) {
3094      SmallVector<Instruction *, 16> Worklist;
3095      PushLoopPHIs(L, Worklist);
3096
3097      SmallPtrSet<Instruction *, 8> Visited;
3098      while (!Worklist.empty()) {
3099        Instruction *I = Worklist.pop_back_val();
3100        if (!Visited.insert(I)) continue;
3101
3102        std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3103          Scalars.find(static_cast<Value *>(I));
3104        if (It != Scalars.end()) {
3105          // SCEVUnknown for a PHI either means that it has an unrecognized
3106          // structure, or it's a PHI that's in the progress of being computed
3107          // by createNodeForPHI.  In the former case, additional loop trip
3108          // count information isn't going to change anything. In the later
3109          // case, createNodeForPHI will perform the necessary updates on its
3110          // own when it gets to that point.
3111          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
3112            Scalars.erase(It);
3113          ValuesAtScopes.erase(I);
3114          if (PHINode *PN = dyn_cast<PHINode>(I))
3115            ConstantEvolutionLoopExitValue.erase(PN);
3116        }
3117
3118        PushDefUseChildren(I, Worklist);
3119      }
3120    }
3121  }
3122  return Pair.first->second;
3123}
3124
3125/// forgetLoopBackedgeTakenCount - This method should be called by the
3126/// client when it has changed a loop in a way that may effect
3127/// ScalarEvolution's ability to compute a trip count, or if the loop
3128/// is deleted.
3129void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
3130  BackedgeTakenCounts.erase(L);
3131
3132  SmallVector<Instruction *, 16> Worklist;
3133  PushLoopPHIs(L, Worklist);
3134
3135  SmallPtrSet<Instruction *, 8> Visited;
3136  while (!Worklist.empty()) {
3137    Instruction *I = Worklist.pop_back_val();
3138    if (!Visited.insert(I)) continue;
3139
3140    std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3141      Scalars.find(static_cast<Value *>(I));
3142    if (It != Scalars.end()) {
3143      Scalars.erase(It);
3144      ValuesAtScopes.erase(I);
3145      if (PHINode *PN = dyn_cast<PHINode>(I))
3146        ConstantEvolutionLoopExitValue.erase(PN);
3147    }
3148
3149    PushDefUseChildren(I, Worklist);
3150  }
3151}
3152
3153/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3154/// of the specified loop will execute.
3155ScalarEvolution::BackedgeTakenInfo
3156ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3157  SmallVector<BasicBlock*, 8> ExitingBlocks;
3158  L->getExitingBlocks(ExitingBlocks);
3159
3160  // Examine all exits and pick the most conservative values.
3161  const SCEV *BECount = getCouldNotCompute();
3162  const SCEV *MaxBECount = getCouldNotCompute();
3163  bool CouldNotComputeBECount = false;
3164  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3165    BackedgeTakenInfo NewBTI =
3166      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3167
3168    if (NewBTI.Exact == getCouldNotCompute()) {
3169      // We couldn't compute an exact value for this exit, so
3170      // we won't be able to compute an exact value for the loop.
3171      CouldNotComputeBECount = true;
3172      BECount = getCouldNotCompute();
3173    } else if (!CouldNotComputeBECount) {
3174      if (BECount == getCouldNotCompute())
3175        BECount = NewBTI.Exact;
3176      else
3177        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3178    }
3179    if (MaxBECount == getCouldNotCompute())
3180      MaxBECount = NewBTI.Max;
3181    else if (NewBTI.Max != getCouldNotCompute())
3182      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3183  }
3184
3185  return BackedgeTakenInfo(BECount, MaxBECount);
3186}
3187
3188/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3189/// of the specified loop will execute if it exits via the specified block.
3190ScalarEvolution::BackedgeTakenInfo
3191ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3192                                                   BasicBlock *ExitingBlock) {
3193
3194  // Okay, we've chosen an exiting block.  See what condition causes us to
3195  // exit at this block.
3196  //
3197  // FIXME: we should be able to handle switch instructions (with a single exit)
3198  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3199  if (ExitBr == 0) return getCouldNotCompute();
3200  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3201
3202  // At this point, we know we have a conditional branch that determines whether
3203  // the loop is exited.  However, we don't know if the branch is executed each
3204  // time through the loop.  If not, then the execution count of the branch will
3205  // not be equal to the trip count of the loop.
3206  //
3207  // Currently we check for this by checking to see if the Exit branch goes to
3208  // the loop header.  If so, we know it will always execute the same number of
3209  // times as the loop.  We also handle the case where the exit block *is* the
3210  // loop header.  This is common for un-rotated loops.
3211  //
3212  // If both of those tests fail, walk up the unique predecessor chain to the
3213  // header, stopping if there is an edge that doesn't exit the loop. If the
3214  // header is reached, the execution count of the branch will be equal to the
3215  // trip count of the loop.
3216  //
3217  //  More extensive analysis could be done to handle more cases here.
3218  //
3219  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3220      ExitBr->getSuccessor(1) != L->getHeader() &&
3221      ExitBr->getParent() != L->getHeader()) {
3222    // The simple checks failed, try climbing the unique predecessor chain
3223    // up to the header.
3224    bool Ok = false;
3225    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3226      BasicBlock *Pred = BB->getUniquePredecessor();
3227      if (!Pred)
3228        return getCouldNotCompute();
3229      TerminatorInst *PredTerm = Pred->getTerminator();
3230      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3231        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3232        if (PredSucc == BB)
3233          continue;
3234        // If the predecessor has a successor that isn't BB and isn't
3235        // outside the loop, assume the worst.
3236        if (L->contains(PredSucc))
3237          return getCouldNotCompute();
3238      }
3239      if (Pred == L->getHeader()) {
3240        Ok = true;
3241        break;
3242      }
3243      BB = Pred;
3244    }
3245    if (!Ok)
3246      return getCouldNotCompute();
3247  }
3248
3249  // Procede to the next level to examine the exit condition expression.
3250  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3251                                               ExitBr->getSuccessor(0),
3252                                               ExitBr->getSuccessor(1));
3253}
3254
3255/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3256/// backedge of the specified loop will execute if its exit condition
3257/// were a conditional branch of ExitCond, TBB, and FBB.
3258ScalarEvolution::BackedgeTakenInfo
3259ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3260                                                       Value *ExitCond,
3261                                                       BasicBlock *TBB,
3262                                                       BasicBlock *FBB) {
3263  // Check if the controlling expression for this loop is an And or Or.
3264  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3265    if (BO->getOpcode() == Instruction::And) {
3266      // Recurse on the operands of the and.
3267      BackedgeTakenInfo BTI0 =
3268        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3269      BackedgeTakenInfo BTI1 =
3270        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3271      const SCEV *BECount = getCouldNotCompute();
3272      const SCEV *MaxBECount = getCouldNotCompute();
3273      if (L->contains(TBB)) {
3274        // Both conditions must be true for the loop to continue executing.
3275        // Choose the less conservative count.
3276        if (BTI0.Exact == getCouldNotCompute() ||
3277            BTI1.Exact == getCouldNotCompute())
3278          BECount = getCouldNotCompute();
3279        else
3280          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3281        if (BTI0.Max == getCouldNotCompute())
3282          MaxBECount = BTI1.Max;
3283        else if (BTI1.Max == getCouldNotCompute())
3284          MaxBECount = BTI0.Max;
3285        else
3286          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3287      } else {
3288        // Both conditions must be true for the loop to exit.
3289        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3290        if (BTI0.Exact != getCouldNotCompute() &&
3291            BTI1.Exact != getCouldNotCompute())
3292          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3293        if (BTI0.Max != getCouldNotCompute() &&
3294            BTI1.Max != getCouldNotCompute())
3295          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3296      }
3297
3298      return BackedgeTakenInfo(BECount, MaxBECount);
3299    }
3300    if (BO->getOpcode() == Instruction::Or) {
3301      // Recurse on the operands of the or.
3302      BackedgeTakenInfo BTI0 =
3303        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3304      BackedgeTakenInfo BTI1 =
3305        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3306      const SCEV *BECount = getCouldNotCompute();
3307      const SCEV *MaxBECount = getCouldNotCompute();
3308      if (L->contains(FBB)) {
3309        // Both conditions must be false for the loop to continue executing.
3310        // Choose the less conservative count.
3311        if (BTI0.Exact == getCouldNotCompute() ||
3312            BTI1.Exact == getCouldNotCompute())
3313          BECount = getCouldNotCompute();
3314        else
3315          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3316        if (BTI0.Max == getCouldNotCompute())
3317          MaxBECount = BTI1.Max;
3318        else if (BTI1.Max == getCouldNotCompute())
3319          MaxBECount = BTI0.Max;
3320        else
3321          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3322      } else {
3323        // Both conditions must be false for the loop to exit.
3324        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3325        if (BTI0.Exact != getCouldNotCompute() &&
3326            BTI1.Exact != getCouldNotCompute())
3327          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3328        if (BTI0.Max != getCouldNotCompute() &&
3329            BTI1.Max != getCouldNotCompute())
3330          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3331      }
3332
3333      return BackedgeTakenInfo(BECount, MaxBECount);
3334    }
3335  }
3336
3337  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3338  // Procede to the next level to examine the icmp.
3339  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3340    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3341
3342  // If it's not an integer or pointer comparison then compute it the hard way.
3343  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3344}
3345
3346/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3347/// backedge of the specified loop will execute if its exit condition
3348/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3349ScalarEvolution::BackedgeTakenInfo
3350ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3351                                                           ICmpInst *ExitCond,
3352                                                           BasicBlock *TBB,
3353                                                           BasicBlock *FBB) {
3354
3355  // If the condition was exit on true, convert the condition to exit on false
3356  ICmpInst::Predicate Cond;
3357  if (!L->contains(FBB))
3358    Cond = ExitCond->getPredicate();
3359  else
3360    Cond = ExitCond->getInversePredicate();
3361
3362  // Handle common loops like: for (X = "string"; *X; ++X)
3363  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3364    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3365      const SCEV *ItCnt =
3366        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3367      if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3368        unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3369        return BackedgeTakenInfo(ItCnt,
3370                                 isa<SCEVConstant>(ItCnt) ? ItCnt :
3371                                   getConstant(APInt::getMaxValue(BitWidth)-1));
3372      }
3373    }
3374
3375  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3376  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3377
3378  // Try to evaluate any dependencies out of the loop.
3379  LHS = getSCEVAtScope(LHS, L);
3380  RHS = getSCEVAtScope(RHS, L);
3381
3382  // At this point, we would like to compute how many iterations of the
3383  // loop the predicate will return true for these inputs.
3384  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3385    // If there is a loop-invariant, force it into the RHS.
3386    std::swap(LHS, RHS);
3387    Cond = ICmpInst::getSwappedPredicate(Cond);
3388  }
3389
3390  // If we have a comparison of a chrec against a constant, try to use value
3391  // ranges to answer this query.
3392  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3393    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3394      if (AddRec->getLoop() == L) {
3395        // Form the constant range.
3396        ConstantRange CompRange(
3397            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3398
3399        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3400        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3401      }
3402
3403  switch (Cond) {
3404  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3405    // Convert to: while (X-Y != 0)
3406    const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3407    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3408    break;
3409  }
3410  case ICmpInst::ICMP_EQ: {
3411    // Convert to: while (X-Y == 0)           // while (X == Y)
3412    const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3413    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3414    break;
3415  }
3416  case ICmpInst::ICMP_SLT: {
3417    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3418    if (BTI.hasAnyInfo()) return BTI;
3419    break;
3420  }
3421  case ICmpInst::ICMP_SGT: {
3422    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3423                                             getNotSCEV(RHS), L, true);
3424    if (BTI.hasAnyInfo()) return BTI;
3425    break;
3426  }
3427  case ICmpInst::ICMP_ULT: {
3428    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3429    if (BTI.hasAnyInfo()) return BTI;
3430    break;
3431  }
3432  case ICmpInst::ICMP_UGT: {
3433    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3434                                             getNotSCEV(RHS), L, false);
3435    if (BTI.hasAnyInfo()) return BTI;
3436    break;
3437  }
3438  default:
3439#if 0
3440    errs() << "ComputeBackedgeTakenCount ";
3441    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3442      errs() << "[unsigned] ";
3443    errs() << *LHS << "   "
3444         << Instruction::getOpcodeName(Instruction::ICmp)
3445         << "   " << *RHS << "\n";
3446#endif
3447    break;
3448  }
3449  return
3450    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3451}
3452
3453static ConstantInt *
3454EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3455                                ScalarEvolution &SE) {
3456  const SCEV *InVal = SE.getConstant(C);
3457  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3458  assert(isa<SCEVConstant>(Val) &&
3459         "Evaluation of SCEV at constant didn't fold correctly?");
3460  return cast<SCEVConstant>(Val)->getValue();
3461}
3462
3463/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3464/// and a GEP expression (missing the pointer index) indexing into it, return
3465/// the addressed element of the initializer or null if the index expression is
3466/// invalid.
3467static Constant *
3468GetAddressedElementFromGlobal(LLVMContext &Context, GlobalVariable *GV,
3469                              const std::vector<ConstantInt*> &Indices) {
3470  Constant *Init = GV->getInitializer();
3471  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3472    uint64_t Idx = Indices[i]->getZExtValue();
3473    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3474      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3475      Init = cast<Constant>(CS->getOperand(Idx));
3476    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3477      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3478      Init = cast<Constant>(CA->getOperand(Idx));
3479    } else if (isa<ConstantAggregateZero>(Init)) {
3480      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3481        assert(Idx < STy->getNumElements() && "Bad struct index!");
3482        Init = Context.getNullValue(STy->getElementType(Idx));
3483      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3484        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3485        Init = Context.getNullValue(ATy->getElementType());
3486      } else {
3487        llvm_unreachable("Unknown constant aggregate type!");
3488      }
3489      return 0;
3490    } else {
3491      return 0; // Unknown initializer type
3492    }
3493  }
3494  return Init;
3495}
3496
3497/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3498/// 'icmp op load X, cst', try to see if we can compute the backedge
3499/// execution count.
3500const SCEV *
3501ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3502                                                LoadInst *LI,
3503                                                Constant *RHS,
3504                                                const Loop *L,
3505                                                ICmpInst::Predicate predicate) {
3506  if (LI->isVolatile()) return getCouldNotCompute();
3507
3508  // Check to see if the loaded pointer is a getelementptr of a global.
3509  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3510  if (!GEP) return getCouldNotCompute();
3511
3512  // Make sure that it is really a constant global we are gepping, with an
3513  // initializer, and make sure the first IDX is really 0.
3514  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3515  if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3516      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3517      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3518    return getCouldNotCompute();
3519
3520  // Okay, we allow one non-constant index into the GEP instruction.
3521  Value *VarIdx = 0;
3522  std::vector<ConstantInt*> Indexes;
3523  unsigned VarIdxNum = 0;
3524  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3525    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3526      Indexes.push_back(CI);
3527    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3528      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3529      VarIdx = GEP->getOperand(i);
3530      VarIdxNum = i-2;
3531      Indexes.push_back(0);
3532    }
3533
3534  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3535  // Check to see if X is a loop variant variable value now.
3536  const SCEV *Idx = getSCEV(VarIdx);
3537  Idx = getSCEVAtScope(Idx, L);
3538
3539  // We can only recognize very limited forms of loop index expressions, in
3540  // particular, only affine AddRec's like {C1,+,C2}.
3541  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3542  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3543      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3544      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3545    return getCouldNotCompute();
3546
3547  unsigned MaxSteps = MaxBruteForceIterations;
3548  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3549    ConstantInt *ItCst = ConstantInt::get(
3550                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
3551    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3552
3553    // Form the GEP offset.
3554    Indexes[VarIdxNum] = Val;
3555
3556    Constant *Result = GetAddressedElementFromGlobal(getContext(), GV, Indexes);
3557    if (Result == 0) break;  // Cannot compute!
3558
3559    // Evaluate the condition for this iteration.
3560    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3561    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3562    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3563#if 0
3564      errs() << "\n***\n*** Computed loop count " << *ItCst
3565             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3566             << "***\n";
3567#endif
3568      ++NumArrayLenItCounts;
3569      return getConstant(ItCst);   // Found terminating iteration!
3570    }
3571  }
3572  return getCouldNotCompute();
3573}
3574
3575
3576/// CanConstantFold - Return true if we can constant fold an instruction of the
3577/// specified type, assuming that all operands were constants.
3578static bool CanConstantFold(const Instruction *I) {
3579  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3580      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3581    return true;
3582
3583  if (const CallInst *CI = dyn_cast<CallInst>(I))
3584    if (const Function *F = CI->getCalledFunction())
3585      return canConstantFoldCallTo(F);
3586  return false;
3587}
3588
3589/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3590/// in the loop that V is derived from.  We allow arbitrary operations along the
3591/// way, but the operands of an operation must either be constants or a value
3592/// derived from a constant PHI.  If this expression does not fit with these
3593/// constraints, return null.
3594static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3595  // If this is not an instruction, or if this is an instruction outside of the
3596  // loop, it can't be derived from a loop PHI.
3597  Instruction *I = dyn_cast<Instruction>(V);
3598  if (I == 0 || !L->contains(I->getParent())) return 0;
3599
3600  if (PHINode *PN = dyn_cast<PHINode>(I)) {
3601    if (L->getHeader() == I->getParent())
3602      return PN;
3603    else
3604      // We don't currently keep track of the control flow needed to evaluate
3605      // PHIs, so we cannot handle PHIs inside of loops.
3606      return 0;
3607  }
3608
3609  // If we won't be able to constant fold this expression even if the operands
3610  // are constants, return early.
3611  if (!CanConstantFold(I)) return 0;
3612
3613  // Otherwise, we can evaluate this instruction if all of its operands are
3614  // constant or derived from a PHI node themselves.
3615  PHINode *PHI = 0;
3616  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3617    if (!(isa<Constant>(I->getOperand(Op)) ||
3618          isa<GlobalValue>(I->getOperand(Op)))) {
3619      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3620      if (P == 0) return 0;  // Not evolving from PHI
3621      if (PHI == 0)
3622        PHI = P;
3623      else if (PHI != P)
3624        return 0;  // Evolving from multiple different PHIs.
3625    }
3626
3627  // This is a expression evolving from a constant PHI!
3628  return PHI;
3629}
3630
3631/// EvaluateExpression - Given an expression that passes the
3632/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3633/// in the loop has the value PHIVal.  If we can't fold this expression for some
3634/// reason, return null.
3635static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3636  if (isa<PHINode>(V)) return PHIVal;
3637  if (Constant *C = dyn_cast<Constant>(V)) return C;
3638  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3639  Instruction *I = cast<Instruction>(V);
3640  LLVMContext &Context = I->getParent()->getContext();
3641
3642  std::vector<Constant*> Operands;
3643  Operands.resize(I->getNumOperands());
3644
3645  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3646    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3647    if (Operands[i] == 0) return 0;
3648  }
3649
3650  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3651    return ConstantFoldCompareInstOperands(CI->getPredicate(),
3652                                           &Operands[0], Operands.size(),
3653                                           Context);
3654  else
3655    return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3656                                    &Operands[0], Operands.size(),
3657                                    Context);
3658}
3659
3660/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3661/// in the header of its containing loop, we know the loop executes a
3662/// constant number of times, and the PHI node is just a recurrence
3663/// involving constants, fold it.
3664Constant *
3665ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3666                                                   const APInt& BEs,
3667                                                   const Loop *L) {
3668  std::map<PHINode*, Constant*>::iterator I =
3669    ConstantEvolutionLoopExitValue.find(PN);
3670  if (I != ConstantEvolutionLoopExitValue.end())
3671    return I->second;
3672
3673  if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3674    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
3675
3676  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3677
3678  // Since the loop is canonicalized, the PHI node must have two entries.  One
3679  // entry must be a constant (coming in from outside of the loop), and the
3680  // second must be derived from the same PHI.
3681  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3682  Constant *StartCST =
3683    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3684  if (StartCST == 0)
3685    return RetVal = 0;  // Must be a constant.
3686
3687  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3688  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3689  if (PN2 != PN)
3690    return RetVal = 0;  // Not derived from same PHI.
3691
3692  // Execute the loop symbolically to determine the exit value.
3693  if (BEs.getActiveBits() >= 32)
3694    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3695
3696  unsigned NumIterations = BEs.getZExtValue(); // must be in range
3697  unsigned IterationNum = 0;
3698  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3699    if (IterationNum == NumIterations)
3700      return RetVal = PHIVal;  // Got exit value!
3701
3702    // Compute the value of the PHI node for the next iteration.
3703    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3704    if (NextPHI == PHIVal)
3705      return RetVal = NextPHI;  // Stopped evolving!
3706    if (NextPHI == 0)
3707      return 0;        // Couldn't evaluate!
3708    PHIVal = NextPHI;
3709  }
3710}
3711
3712/// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
3713/// constant number of times (the condition evolves only from constants),
3714/// try to evaluate a few iterations of the loop until we get the exit
3715/// condition gets a value of ExitWhen (true or false).  If we cannot
3716/// evaluate the trip count of the loop, return getCouldNotCompute().
3717const SCEV *
3718ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3719                                                       Value *Cond,
3720                                                       bool ExitWhen) {
3721  PHINode *PN = getConstantEvolvingPHI(Cond, L);
3722  if (PN == 0) return getCouldNotCompute();
3723
3724  // Since the loop is canonicalized, the PHI node must have two entries.  One
3725  // entry must be a constant (coming in from outside of the loop), and the
3726  // second must be derived from the same PHI.
3727  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3728  Constant *StartCST =
3729    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3730  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
3731
3732  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3733  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3734  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
3735
3736  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
3737  // the loop symbolically to determine when the condition gets a value of
3738  // "ExitWhen".
3739  unsigned IterationNum = 0;
3740  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
3741  for (Constant *PHIVal = StartCST;
3742       IterationNum != MaxIterations; ++IterationNum) {
3743    ConstantInt *CondVal =
3744      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3745
3746    // Couldn't symbolically evaluate.
3747    if (!CondVal) return getCouldNotCompute();
3748
3749    if (CondVal->getValue() == uint64_t(ExitWhen)) {
3750      ++NumBruteForceTripCountsComputed;
3751      return getConstant(Type::Int32Ty, IterationNum);
3752    }
3753
3754    // Compute the value of the PHI node for the next iteration.
3755    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3756    if (NextPHI == 0 || NextPHI == PHIVal)
3757      return getCouldNotCompute();// Couldn't evaluate or not making progress...
3758    PHIVal = NextPHI;
3759  }
3760
3761  // Too many iterations were needed to evaluate.
3762  return getCouldNotCompute();
3763}
3764
3765/// getSCEVAtScope - Return a SCEV expression handle for the specified value
3766/// at the specified scope in the program.  The L value specifies a loop
3767/// nest to evaluate the expression at, where null is the top-level or a
3768/// specified loop is immediately inside of the loop.
3769///
3770/// This method can be used to compute the exit value for a variable defined
3771/// in a loop by querying what the value will hold in the parent loop.
3772///
3773/// In the case that a relevant loop exit value cannot be computed, the
3774/// original value V is returned.
3775const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3776  // FIXME: this should be turned into a virtual method on SCEV!
3777
3778  if (isa<SCEVConstant>(V)) return V;
3779
3780  // If this instruction is evolved from a constant-evolving PHI, compute the
3781  // exit value from the loop without using SCEVs.
3782  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3783    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3784      const Loop *LI = (*this->LI)[I->getParent()];
3785      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
3786        if (PHINode *PN = dyn_cast<PHINode>(I))
3787          if (PN->getParent() == LI->getHeader()) {
3788            // Okay, there is no closed form solution for the PHI node.  Check
3789            // to see if the loop that contains it has a known backedge-taken
3790            // count.  If so, we may be able to force computation of the exit
3791            // value.
3792            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3793            if (const SCEVConstant *BTCC =
3794                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3795              // Okay, we know how many times the containing loop executes.  If
3796              // this is a constant evolving PHI node, get the final value at
3797              // the specified iteration number.
3798              Constant *RV = getConstantEvolutionLoopExitValue(PN,
3799                                                   BTCC->getValue()->getValue(),
3800                                                               LI);
3801              if (RV) return getSCEV(RV);
3802            }
3803          }
3804
3805      // Okay, this is an expression that we cannot symbolically evaluate
3806      // into a SCEV.  Check to see if it's possible to symbolically evaluate
3807      // the arguments into constants, and if so, try to constant propagate the
3808      // result.  This is particularly useful for computing loop exit values.
3809      if (CanConstantFold(I)) {
3810        // Check to see if we've folded this instruction at this loop before.
3811        std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3812        std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3813          Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3814        if (!Pair.second)
3815          return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3816
3817        std::vector<Constant*> Operands;
3818        Operands.reserve(I->getNumOperands());
3819        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3820          Value *Op = I->getOperand(i);
3821          if (Constant *C = dyn_cast<Constant>(Op)) {
3822            Operands.push_back(C);
3823          } else {
3824            // If any of the operands is non-constant and if they are
3825            // non-integer and non-pointer, don't even try to analyze them
3826            // with scev techniques.
3827            if (!isSCEVable(Op->getType()))
3828              return V;
3829
3830            const SCEV* OpV = getSCEVAtScope(Op, L);
3831            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3832              Constant *C = SC->getValue();
3833              if (C->getType() != Op->getType())
3834                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3835                                                                  Op->getType(),
3836                                                                  false),
3837                                          C, Op->getType());
3838              Operands.push_back(C);
3839            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3840              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3841                if (C->getType() != Op->getType())
3842                  C =
3843                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3844                                                                  Op->getType(),
3845                                                                  false),
3846                                          C, Op->getType());
3847                Operands.push_back(C);
3848              } else
3849                return V;
3850            } else {
3851              return V;
3852            }
3853          }
3854        }
3855
3856        Constant *C;
3857        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3858          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3859                                              &Operands[0], Operands.size(),
3860                                              getContext());
3861        else
3862          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3863                                       &Operands[0], Operands.size(),
3864                                       getContext());
3865        Pair.first->second = C;
3866        return getSCEV(C);
3867      }
3868    }
3869
3870    // This is some other type of SCEVUnknown, just return it.
3871    return V;
3872  }
3873
3874  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3875    // Avoid performing the look-up in the common case where the specified
3876    // expression has no loop-variant portions.
3877    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3878      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3879      if (OpAtScope != Comm->getOperand(i)) {
3880        // Okay, at least one of these operands is loop variant but might be
3881        // foldable.  Build a new instance of the folded commutative expression.
3882        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3883                                            Comm->op_begin()+i);
3884        NewOps.push_back(OpAtScope);
3885
3886        for (++i; i != e; ++i) {
3887          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3888          NewOps.push_back(OpAtScope);
3889        }
3890        if (isa<SCEVAddExpr>(Comm))
3891          return getAddExpr(NewOps);
3892        if (isa<SCEVMulExpr>(Comm))
3893          return getMulExpr(NewOps);
3894        if (isa<SCEVSMaxExpr>(Comm))
3895          return getSMaxExpr(NewOps);
3896        if (isa<SCEVUMaxExpr>(Comm))
3897          return getUMaxExpr(NewOps);
3898        llvm_unreachable("Unknown commutative SCEV type!");
3899      }
3900    }
3901    // If we got here, all operands are loop invariant.
3902    return Comm;
3903  }
3904
3905  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3906    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3907    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3908    if (LHS == Div->getLHS() && RHS == Div->getRHS())
3909      return Div;   // must be loop invariant
3910    return getUDivExpr(LHS, RHS);
3911  }
3912
3913  // If this is a loop recurrence for a loop that does not contain L, then we
3914  // are dealing with the final value computed by the loop.
3915  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3916    if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3917      // To evaluate this recurrence, we need to know how many times the AddRec
3918      // loop iterates.  Compute this now.
3919      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3920      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3921
3922      // Then, evaluate the AddRec.
3923      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3924    }
3925    return AddRec;
3926  }
3927
3928  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3929    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3930    if (Op == Cast->getOperand())
3931      return Cast;  // must be loop invariant
3932    return getZeroExtendExpr(Op, Cast->getType());
3933  }
3934
3935  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3936    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3937    if (Op == Cast->getOperand())
3938      return Cast;  // must be loop invariant
3939    return getSignExtendExpr(Op, Cast->getType());
3940  }
3941
3942  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3943    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3944    if (Op == Cast->getOperand())
3945      return Cast;  // must be loop invariant
3946    return getTruncateExpr(Op, Cast->getType());
3947  }
3948
3949  llvm_unreachable("Unknown SCEV type!");
3950  return 0;
3951}
3952
3953/// getSCEVAtScope - This is a convenience function which does
3954/// getSCEVAtScope(getSCEV(V), L).
3955const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3956  return getSCEVAtScope(getSCEV(V), L);
3957}
3958
3959/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3960/// following equation:
3961///
3962///     A * X = B (mod N)
3963///
3964/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3965/// A and B isn't important.
3966///
3967/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3968static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3969                                               ScalarEvolution &SE) {
3970  uint32_t BW = A.getBitWidth();
3971  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3972  assert(A != 0 && "A must be non-zero.");
3973
3974  // 1. D = gcd(A, N)
3975  //
3976  // The gcd of A and N may have only one prime factor: 2. The number of
3977  // trailing zeros in A is its multiplicity
3978  uint32_t Mult2 = A.countTrailingZeros();
3979  // D = 2^Mult2
3980
3981  // 2. Check if B is divisible by D.
3982  //
3983  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3984  // is not less than multiplicity of this prime factor for D.
3985  if (B.countTrailingZeros() < Mult2)
3986    return SE.getCouldNotCompute();
3987
3988  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3989  // modulo (N / D).
3990  //
3991  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
3992  // bit width during computations.
3993  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
3994  APInt Mod(BW + 1, 0);
3995  Mod.set(BW - Mult2);  // Mod = N / D
3996  APInt I = AD.multiplicativeInverse(Mod);
3997
3998  // 4. Compute the minimum unsigned root of the equation:
3999  // I * (B / D) mod (N / D)
4000  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4001
4002  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4003  // bits.
4004  return SE.getConstant(Result.trunc(BW));
4005}
4006
4007/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4008/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4009/// might be the same) or two SCEVCouldNotCompute objects.
4010///
4011static std::pair<const SCEV *,const SCEV *>
4012SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4013  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4014  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4015  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4016  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4017
4018  // We currently can only solve this if the coefficients are constants.
4019  if (!LC || !MC || !NC) {
4020    const SCEV *CNC = SE.getCouldNotCompute();
4021    return std::make_pair(CNC, CNC);
4022  }
4023
4024  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4025  const APInt &L = LC->getValue()->getValue();
4026  const APInt &M = MC->getValue()->getValue();
4027  const APInt &N = NC->getValue()->getValue();
4028  APInt Two(BitWidth, 2);
4029  APInt Four(BitWidth, 4);
4030
4031  {
4032    using namespace APIntOps;
4033    const APInt& C = L;
4034    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4035    // The B coefficient is M-N/2
4036    APInt B(M);
4037    B -= sdiv(N,Two);
4038
4039    // The A coefficient is N/2
4040    APInt A(N.sdiv(Two));
4041
4042    // Compute the B^2-4ac term.
4043    APInt SqrtTerm(B);
4044    SqrtTerm *= B;
4045    SqrtTerm -= Four * (A * C);
4046
4047    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4048    // integer value or else APInt::sqrt() will assert.
4049    APInt SqrtVal(SqrtTerm.sqrt());
4050
4051    // Compute the two solutions for the quadratic formula.
4052    // The divisions must be performed as signed divisions.
4053    APInt NegB(-B);
4054    APInt TwoA( A << 1 );
4055    if (TwoA.isMinValue()) {
4056      const SCEV *CNC = SE.getCouldNotCompute();
4057      return std::make_pair(CNC, CNC);
4058    }
4059
4060    LLVMContext &Context = SE.getContext();
4061
4062    ConstantInt *Solution1 =
4063      ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4064    ConstantInt *Solution2 =
4065      ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4066
4067    return std::make_pair(SE.getConstant(Solution1),
4068                          SE.getConstant(Solution2));
4069    } // end APIntOps namespace
4070}
4071
4072/// HowFarToZero - Return the number of times a backedge comparing the specified
4073/// value to zero will execute.  If not computable, return CouldNotCompute.
4074const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4075  // If the value is a constant
4076  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4077    // If the value is already zero, the branch will execute zero times.
4078    if (C->getValue()->isZero()) return C;
4079    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4080  }
4081
4082  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4083  if (!AddRec || AddRec->getLoop() != L)
4084    return getCouldNotCompute();
4085
4086  if (AddRec->isAffine()) {
4087    // If this is an affine expression, the execution count of this branch is
4088    // the minimum unsigned root of the following equation:
4089    //
4090    //     Start + Step*N = 0 (mod 2^BW)
4091    //
4092    // equivalent to:
4093    //
4094    //             Step*N = -Start (mod 2^BW)
4095    //
4096    // where BW is the common bit width of Start and Step.
4097
4098    // Get the initial value for the loop.
4099    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4100                                       L->getParentLoop());
4101    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4102                                      L->getParentLoop());
4103
4104    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4105      // For now we handle only constant steps.
4106
4107      // First, handle unitary steps.
4108      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4109        return getNegativeSCEV(Start);       //   N = -Start (as unsigned)
4110      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4111        return Start;                           //    N = Start (as unsigned)
4112
4113      // Then, try to solve the above equation provided that Start is constant.
4114      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4115        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4116                                            -StartC->getValue()->getValue(),
4117                                            *this);
4118    }
4119  } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
4120    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4121    // the quadratic equation to solve it.
4122    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4123                                                                    *this);
4124    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4125    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4126    if (R1) {
4127#if 0
4128      errs() << "HFTZ: " << *V << " - sol#1: " << *R1
4129             << "  sol#2: " << *R2 << "\n";
4130#endif
4131      // Pick the smallest positive root value.
4132      if (ConstantInt *CB =
4133          dyn_cast<ConstantInt>(getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
4134                                   R1->getValue(), R2->getValue()))) {
4135        if (CB->getZExtValue() == false)
4136          std::swap(R1, R2);   // R1 is the minimum root now.
4137
4138        // We can only use this value if the chrec ends up with an exact zero
4139        // value at this index.  When solving for "X*X != 5", for example, we
4140        // should not accept a root of 2.
4141        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4142        if (Val->isZero())
4143          return R1;  // We found a quadratic root!
4144      }
4145    }
4146  }
4147
4148  return getCouldNotCompute();
4149}
4150
4151/// HowFarToNonZero - Return the number of times a backedge checking the
4152/// specified value for nonzero will execute.  If not computable, return
4153/// CouldNotCompute
4154const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4155  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4156  // handle them yet except for the trivial case.  This could be expanded in the
4157  // future as needed.
4158
4159  // If the value is a constant, check to see if it is known to be non-zero
4160  // already.  If so, the backedge will execute zero times.
4161  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4162    if (!C->getValue()->isNullValue())
4163      return getIntegerSCEV(0, C->getType());
4164    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4165  }
4166
4167  // We could implement others, but I really doubt anyone writes loops like
4168  // this, and if they did, they would already be constant folded.
4169  return getCouldNotCompute();
4170}
4171
4172/// getLoopPredecessor - If the given loop's header has exactly one unique
4173/// predecessor outside the loop, return it. Otherwise return null.
4174///
4175BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4176  BasicBlock *Header = L->getHeader();
4177  BasicBlock *Pred = 0;
4178  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4179       PI != E; ++PI)
4180    if (!L->contains(*PI)) {
4181      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4182      Pred = *PI;
4183    }
4184  return Pred;
4185}
4186
4187/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4188/// (which may not be an immediate predecessor) which has exactly one
4189/// successor from which BB is reachable, or null if no such block is
4190/// found.
4191///
4192BasicBlock *
4193ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4194  // If the block has a unique predecessor, then there is no path from the
4195  // predecessor to the block that does not go through the direct edge
4196  // from the predecessor to the block.
4197  if (BasicBlock *Pred = BB->getSinglePredecessor())
4198    return Pred;
4199
4200  // A loop's header is defined to be a block that dominates the loop.
4201  // If the header has a unique predecessor outside the loop, it must be
4202  // a block that has exactly one successor that can reach the loop.
4203  if (Loop *L = LI->getLoopFor(BB))
4204    return getLoopPredecessor(L);
4205
4206  return 0;
4207}
4208
4209/// HasSameValue - SCEV structural equivalence is usually sufficient for
4210/// testing whether two expressions are equal, however for the purposes of
4211/// looking for a condition guarding a loop, it can be useful to be a little
4212/// more general, since a front-end may have replicated the controlling
4213/// expression.
4214///
4215static bool HasSameValue(const SCEV *A, const SCEV *B) {
4216  // Quick check to see if they are the same SCEV.
4217  if (A == B) return true;
4218
4219  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4220  // two different instructions with the same value. Check for this case.
4221  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4222    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4223      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4224        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4225          if (AI->isIdenticalTo(BI))
4226            return true;
4227
4228  // Otherwise assume they may have a different value.
4229  return false;
4230}
4231
4232bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4233  return getSignedRange(S).getSignedMax().isNegative();
4234}
4235
4236bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4237  return getSignedRange(S).getSignedMin().isStrictlyPositive();
4238}
4239
4240bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4241  return !getSignedRange(S).getSignedMin().isNegative();
4242}
4243
4244bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4245  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4246}
4247
4248bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4249  return isKnownNegative(S) || isKnownPositive(S);
4250}
4251
4252bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4253                                       const SCEV *LHS, const SCEV *RHS) {
4254
4255  if (HasSameValue(LHS, RHS))
4256    return ICmpInst::isTrueWhenEqual(Pred);
4257
4258  switch (Pred) {
4259  default:
4260    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4261    break;
4262  case ICmpInst::ICMP_SGT:
4263    Pred = ICmpInst::ICMP_SLT;
4264    std::swap(LHS, RHS);
4265  case ICmpInst::ICMP_SLT: {
4266    ConstantRange LHSRange = getSignedRange(LHS);
4267    ConstantRange RHSRange = getSignedRange(RHS);
4268    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4269      return true;
4270    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4271      return false;
4272    break;
4273  }
4274  case ICmpInst::ICMP_SGE:
4275    Pred = ICmpInst::ICMP_SLE;
4276    std::swap(LHS, RHS);
4277  case ICmpInst::ICMP_SLE: {
4278    ConstantRange LHSRange = getSignedRange(LHS);
4279    ConstantRange RHSRange = getSignedRange(RHS);
4280    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4281      return true;
4282    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4283      return false;
4284    break;
4285  }
4286  case ICmpInst::ICMP_UGT:
4287    Pred = ICmpInst::ICMP_ULT;
4288    std::swap(LHS, RHS);
4289  case ICmpInst::ICMP_ULT: {
4290    ConstantRange LHSRange = getUnsignedRange(LHS);
4291    ConstantRange RHSRange = getUnsignedRange(RHS);
4292    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4293      return true;
4294    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4295      return false;
4296    break;
4297  }
4298  case ICmpInst::ICMP_UGE:
4299    Pred = ICmpInst::ICMP_ULE;
4300    std::swap(LHS, RHS);
4301  case ICmpInst::ICMP_ULE: {
4302    ConstantRange LHSRange = getUnsignedRange(LHS);
4303    ConstantRange RHSRange = getUnsignedRange(RHS);
4304    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4305      return true;
4306    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4307      return false;
4308    break;
4309  }
4310  case ICmpInst::ICMP_NE: {
4311    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4312      return true;
4313    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4314      return true;
4315
4316    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4317    if (isKnownNonZero(Diff))
4318      return true;
4319    break;
4320  }
4321  case ICmpInst::ICMP_EQ:
4322    // The check at the top of the function catches the case where
4323    // the values are known to be equal.
4324    break;
4325  }
4326  return false;
4327}
4328
4329/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4330/// protected by a conditional between LHS and RHS.  This is used to
4331/// to eliminate casts.
4332bool
4333ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4334                                             ICmpInst::Predicate Pred,
4335                                             const SCEV *LHS, const SCEV *RHS) {
4336  // Interpret a null as meaning no loop, where there is obviously no guard
4337  // (interprocedural conditions notwithstanding).
4338  if (!L) return true;
4339
4340  BasicBlock *Latch = L->getLoopLatch();
4341  if (!Latch)
4342    return false;
4343
4344  BranchInst *LoopContinuePredicate =
4345    dyn_cast<BranchInst>(Latch->getTerminator());
4346  if (!LoopContinuePredicate ||
4347      LoopContinuePredicate->isUnconditional())
4348    return false;
4349
4350  return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4351                       LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4352}
4353
4354/// isLoopGuardedByCond - Test whether entry to the loop is protected
4355/// by a conditional between LHS and RHS.  This is used to help avoid max
4356/// expressions in loop trip counts, and to eliminate casts.
4357bool
4358ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4359                                     ICmpInst::Predicate Pred,
4360                                     const SCEV *LHS, const SCEV *RHS) {
4361  // Interpret a null as meaning no loop, where there is obviously no guard
4362  // (interprocedural conditions notwithstanding).
4363  if (!L) return false;
4364
4365  BasicBlock *Predecessor = getLoopPredecessor(L);
4366  BasicBlock *PredecessorDest = L->getHeader();
4367
4368  // Starting at the loop predecessor, climb up the predecessor chain, as long
4369  // as there are predecessors that can be found that have unique successors
4370  // leading to the original header.
4371  for (; Predecessor;
4372       PredecessorDest = Predecessor,
4373       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4374
4375    BranchInst *LoopEntryPredicate =
4376      dyn_cast<BranchInst>(Predecessor->getTerminator());
4377    if (!LoopEntryPredicate ||
4378        LoopEntryPredicate->isUnconditional())
4379      continue;
4380
4381    if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4382                      LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4383      return true;
4384  }
4385
4386  return false;
4387}
4388
4389/// isImpliedCond - Test whether the condition described by Pred, LHS,
4390/// and RHS is true whenever the given Cond value evaluates to true.
4391bool ScalarEvolution::isImpliedCond(Value *CondValue,
4392                                    ICmpInst::Predicate Pred,
4393                                    const SCEV *LHS, const SCEV *RHS,
4394                                    bool Inverse) {
4395  // Recursivly handle And and Or conditions.
4396  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4397    if (BO->getOpcode() == Instruction::And) {
4398      if (!Inverse)
4399        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4400               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4401    } else if (BO->getOpcode() == Instruction::Or) {
4402      if (Inverse)
4403        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4404               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4405    }
4406  }
4407
4408  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4409  if (!ICI) return false;
4410
4411  // Bail if the ICmp's operands' types are wider than the needed type
4412  // before attempting to call getSCEV on them. This avoids infinite
4413  // recursion, since the analysis of widening casts can require loop
4414  // exit condition information for overflow checking, which would
4415  // lead back here.
4416  if (getTypeSizeInBits(LHS->getType()) <
4417      getTypeSizeInBits(ICI->getOperand(0)->getType()))
4418    return false;
4419
4420  // Now that we found a conditional branch that dominates the loop, check to
4421  // see if it is the comparison we are looking for.
4422  ICmpInst::Predicate FoundPred;
4423  if (Inverse)
4424    FoundPred = ICI->getInversePredicate();
4425  else
4426    FoundPred = ICI->getPredicate();
4427
4428  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
4429  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
4430
4431  // Balance the types. The case where FoundLHS' type is wider than
4432  // LHS' type is checked for above.
4433  if (getTypeSizeInBits(LHS->getType()) >
4434      getTypeSizeInBits(FoundLHS->getType())) {
4435    if (CmpInst::isSigned(Pred)) {
4436      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4437      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4438    } else {
4439      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4440      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4441    }
4442  }
4443
4444  // Canonicalize the query to match the way instcombine will have
4445  // canonicalized the comparison.
4446  // First, put a constant operand on the right.
4447  if (isa<SCEVConstant>(LHS)) {
4448    std::swap(LHS, RHS);
4449    Pred = ICmpInst::getSwappedPredicate(Pred);
4450  }
4451  // Then, canonicalize comparisons with boundary cases.
4452  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4453    const APInt &RA = RC->getValue()->getValue();
4454    switch (Pred) {
4455    default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4456    case ICmpInst::ICMP_EQ:
4457    case ICmpInst::ICMP_NE:
4458      break;
4459    case ICmpInst::ICMP_UGE:
4460      if ((RA - 1).isMinValue()) {
4461        Pred = ICmpInst::ICMP_NE;
4462        RHS = getConstant(RA - 1);
4463        break;
4464      }
4465      if (RA.isMaxValue()) {
4466        Pred = ICmpInst::ICMP_EQ;
4467        break;
4468      }
4469      if (RA.isMinValue()) return true;
4470      break;
4471    case ICmpInst::ICMP_ULE:
4472      if ((RA + 1).isMaxValue()) {
4473        Pred = ICmpInst::ICMP_NE;
4474        RHS = getConstant(RA + 1);
4475        break;
4476      }
4477      if (RA.isMinValue()) {
4478        Pred = ICmpInst::ICMP_EQ;
4479        break;
4480      }
4481      if (RA.isMaxValue()) return true;
4482      break;
4483    case ICmpInst::ICMP_SGE:
4484      if ((RA - 1).isMinSignedValue()) {
4485        Pred = ICmpInst::ICMP_NE;
4486        RHS = getConstant(RA - 1);
4487        break;
4488      }
4489      if (RA.isMaxSignedValue()) {
4490        Pred = ICmpInst::ICMP_EQ;
4491        break;
4492      }
4493      if (RA.isMinSignedValue()) return true;
4494      break;
4495    case ICmpInst::ICMP_SLE:
4496      if ((RA + 1).isMaxSignedValue()) {
4497        Pred = ICmpInst::ICMP_NE;
4498        RHS = getConstant(RA + 1);
4499        break;
4500      }
4501      if (RA.isMinSignedValue()) {
4502        Pred = ICmpInst::ICMP_EQ;
4503        break;
4504      }
4505      if (RA.isMaxSignedValue()) return true;
4506      break;
4507    case ICmpInst::ICMP_UGT:
4508      if (RA.isMinValue()) {
4509        Pred = ICmpInst::ICMP_NE;
4510        break;
4511      }
4512      if ((RA + 1).isMaxValue()) {
4513        Pred = ICmpInst::ICMP_EQ;
4514        RHS = getConstant(RA + 1);
4515        break;
4516      }
4517      if (RA.isMaxValue()) return false;
4518      break;
4519    case ICmpInst::ICMP_ULT:
4520      if (RA.isMaxValue()) {
4521        Pred = ICmpInst::ICMP_NE;
4522        break;
4523      }
4524      if ((RA - 1).isMinValue()) {
4525        Pred = ICmpInst::ICMP_EQ;
4526        RHS = getConstant(RA - 1);
4527        break;
4528      }
4529      if (RA.isMinValue()) return false;
4530      break;
4531    case ICmpInst::ICMP_SGT:
4532      if (RA.isMinSignedValue()) {
4533        Pred = ICmpInst::ICMP_NE;
4534        break;
4535      }
4536      if ((RA + 1).isMaxSignedValue()) {
4537        Pred = ICmpInst::ICMP_EQ;
4538        RHS = getConstant(RA + 1);
4539        break;
4540      }
4541      if (RA.isMaxSignedValue()) return false;
4542      break;
4543    case ICmpInst::ICMP_SLT:
4544      if (RA.isMaxSignedValue()) {
4545        Pred = ICmpInst::ICMP_NE;
4546        break;
4547      }
4548      if ((RA - 1).isMinSignedValue()) {
4549       Pred = ICmpInst::ICMP_EQ;
4550       RHS = getConstant(RA - 1);
4551       break;
4552      }
4553      if (RA.isMinSignedValue()) return false;
4554      break;
4555    }
4556  }
4557
4558  // Check to see if we can make the LHS or RHS match.
4559  if (LHS == FoundRHS || RHS == FoundLHS) {
4560    if (isa<SCEVConstant>(RHS)) {
4561      std::swap(FoundLHS, FoundRHS);
4562      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
4563    } else {
4564      std::swap(LHS, RHS);
4565      Pred = ICmpInst::getSwappedPredicate(Pred);
4566    }
4567  }
4568
4569  // Check whether the found predicate is the same as the desired predicate.
4570  if (FoundPred == Pred)
4571    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
4572
4573  // Check whether swapping the found predicate makes it the same as the
4574  // desired predicate.
4575  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
4576    if (isa<SCEVConstant>(RHS))
4577      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
4578    else
4579      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
4580                                   RHS, LHS, FoundLHS, FoundRHS);
4581  }
4582
4583  // Check whether the actual condition is beyond sufficient.
4584  if (FoundPred == ICmpInst::ICMP_EQ)
4585    if (ICmpInst::isTrueWhenEqual(Pred))
4586      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
4587        return true;
4588  if (Pred == ICmpInst::ICMP_NE)
4589    if (!ICmpInst::isTrueWhenEqual(FoundPred))
4590      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
4591        return true;
4592
4593  // Otherwise assume the worst.
4594  return false;
4595}
4596
4597/// isImpliedCondOperands - Test whether the condition described by Pred,
4598/// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS,
4599/// and FoundRHS is true.
4600bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
4601                                            const SCEV *LHS, const SCEV *RHS,
4602                                            const SCEV *FoundLHS,
4603                                            const SCEV *FoundRHS) {
4604  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
4605                                     FoundLHS, FoundRHS) ||
4606         // ~x < ~y --> x > y
4607         isImpliedCondOperandsHelper(Pred, LHS, RHS,
4608                                     getNotSCEV(FoundRHS),
4609                                     getNotSCEV(FoundLHS));
4610}
4611
4612/// isImpliedCondOperandsHelper - Test whether the condition described by
4613/// Pred, LHS, and RHS is true whenever the condition desribed by Pred,
4614/// FoundLHS, and FoundRHS is true.
4615bool
4616ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
4617                                             const SCEV *LHS, const SCEV *RHS,
4618                                             const SCEV *FoundLHS,
4619                                             const SCEV *FoundRHS) {
4620  switch (Pred) {
4621  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4622  case ICmpInst::ICMP_EQ:
4623  case ICmpInst::ICMP_NE:
4624    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
4625      return true;
4626    break;
4627  case ICmpInst::ICMP_SLT:
4628  case ICmpInst::ICMP_SLE:
4629    if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
4630        isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
4631      return true;
4632    break;
4633  case ICmpInst::ICMP_SGT:
4634  case ICmpInst::ICMP_SGE:
4635    if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
4636        isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
4637      return true;
4638    break;
4639  case ICmpInst::ICMP_ULT:
4640  case ICmpInst::ICMP_ULE:
4641    if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
4642        isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
4643      return true;
4644    break;
4645  case ICmpInst::ICMP_UGT:
4646  case ICmpInst::ICMP_UGE:
4647    if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
4648        isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
4649      return true;
4650    break;
4651  }
4652
4653  return false;
4654}
4655
4656/// getBECount - Subtract the end and start values and divide by the step,
4657/// rounding up, to get the number of times the backedge is executed. Return
4658/// CouldNotCompute if an intermediate computation overflows.
4659const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4660                                        const SCEV *End,
4661                                        const SCEV *Step) {
4662  const Type *Ty = Start->getType();
4663  const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4664  const SCEV *Diff = getMinusSCEV(End, Start);
4665  const SCEV *RoundUp = getAddExpr(Step, NegOne);
4666
4667  // Add an adjustment to the difference between End and Start so that
4668  // the division will effectively round up.
4669  const SCEV *Add = getAddExpr(Diff, RoundUp);
4670
4671  // Check Add for unsigned overflow.
4672  // TODO: More sophisticated things could be done here.
4673  const Type *WideTy = getContext().getIntegerType(getTypeSizeInBits(Ty) + 1);
4674  const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
4675  const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
4676  const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
4677  if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4678    return getCouldNotCompute();
4679
4680  return getUDivExpr(Add, Step);
4681}
4682
4683/// HowManyLessThans - Return the number of times a backedge containing the
4684/// specified less-than comparison will execute.  If not computable, return
4685/// CouldNotCompute.
4686ScalarEvolution::BackedgeTakenInfo
4687ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4688                                  const Loop *L, bool isSigned) {
4689  // Only handle:  "ADDREC < LoopInvariant".
4690  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4691
4692  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4693  if (!AddRec || AddRec->getLoop() != L)
4694    return getCouldNotCompute();
4695
4696  if (AddRec->isAffine()) {
4697    // FORNOW: We only support unit strides.
4698    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4699    const SCEV *Step = AddRec->getStepRecurrence(*this);
4700
4701    // TODO: handle non-constant strides.
4702    const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4703    if (!CStep || CStep->isZero())
4704      return getCouldNotCompute();
4705    if (CStep->isOne()) {
4706      // With unit stride, the iteration never steps past the limit value.
4707    } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4708      if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4709        // Test whether a positive iteration iteration can step past the limit
4710        // value and past the maximum value for its type in a single step.
4711        if (isSigned) {
4712          APInt Max = APInt::getSignedMaxValue(BitWidth);
4713          if ((Max - CStep->getValue()->getValue())
4714                .slt(CLimit->getValue()->getValue()))
4715            return getCouldNotCompute();
4716        } else {
4717          APInt Max = APInt::getMaxValue(BitWidth);
4718          if ((Max - CStep->getValue()->getValue())
4719                .ult(CLimit->getValue()->getValue()))
4720            return getCouldNotCompute();
4721        }
4722      } else
4723        // TODO: handle non-constant limit values below.
4724        return getCouldNotCompute();
4725    } else
4726      // TODO: handle negative strides below.
4727      return getCouldNotCompute();
4728
4729    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4730    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
4731    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4732    // treat m-n as signed nor unsigned due to overflow possibility.
4733
4734    // First, we get the value of the LHS in the first iteration: n
4735    const SCEV *Start = AddRec->getOperand(0);
4736
4737    // Determine the minimum constant start value.
4738    const SCEV *MinStart = getConstant(isSigned ?
4739      getSignedRange(Start).getSignedMin() :
4740      getUnsignedRange(Start).getUnsignedMin());
4741
4742    // If we know that the condition is true in order to enter the loop,
4743    // then we know that it will run exactly (m-n)/s times. Otherwise, we
4744    // only know that it will execute (max(m,n)-n)/s times. In both cases,
4745    // the division must round up.
4746    const SCEV *End = RHS;
4747    if (!isLoopGuardedByCond(L,
4748                             isSigned ? ICmpInst::ICMP_SLT :
4749                                        ICmpInst::ICMP_ULT,
4750                             getMinusSCEV(Start, Step), RHS))
4751      End = isSigned ? getSMaxExpr(RHS, Start)
4752                     : getUMaxExpr(RHS, Start);
4753
4754    // Determine the maximum constant end value.
4755    const SCEV *MaxEnd = getConstant(isSigned ?
4756      getSignedRange(End).getSignedMax() :
4757      getUnsignedRange(End).getUnsignedMax());
4758
4759    // Finally, we subtract these two values and divide, rounding up, to get
4760    // the number of times the backedge is executed.
4761    const SCEV *BECount = getBECount(Start, End, Step);
4762
4763    // The maximum backedge count is similar, except using the minimum start
4764    // value and the maximum end value.
4765    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4766
4767    return BackedgeTakenInfo(BECount, MaxBECount);
4768  }
4769
4770  return getCouldNotCompute();
4771}
4772
4773/// getNumIterationsInRange - Return the number of iterations of this loop that
4774/// produce values in the specified constant range.  Another way of looking at
4775/// this is that it returns the first iteration number where the value is not in
4776/// the condition, thus computing the exit count. If the iteration count can't
4777/// be computed, an instance of SCEVCouldNotCompute is returned.
4778const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4779                                                    ScalarEvolution &SE) const {
4780  if (Range.isFullSet())  // Infinite loop.
4781    return SE.getCouldNotCompute();
4782
4783  // If the start is a non-zero constant, shift the range to simplify things.
4784  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4785    if (!SC->getValue()->isZero()) {
4786      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4787      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4788      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4789      if (const SCEVAddRecExpr *ShiftedAddRec =
4790            dyn_cast<SCEVAddRecExpr>(Shifted))
4791        return ShiftedAddRec->getNumIterationsInRange(
4792                           Range.subtract(SC->getValue()->getValue()), SE);
4793      // This is strange and shouldn't happen.
4794      return SE.getCouldNotCompute();
4795    }
4796
4797  // The only time we can solve this is when we have all constant indices.
4798  // Otherwise, we cannot determine the overflow conditions.
4799  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4800    if (!isa<SCEVConstant>(getOperand(i)))
4801      return SE.getCouldNotCompute();
4802
4803
4804  // Okay at this point we know that all elements of the chrec are constants and
4805  // that the start element is zero.
4806
4807  // First check to see if the range contains zero.  If not, the first
4808  // iteration exits.
4809  unsigned BitWidth = SE.getTypeSizeInBits(getType());
4810  if (!Range.contains(APInt(BitWidth, 0)))
4811    return SE.getIntegerSCEV(0, getType());
4812
4813  if (isAffine()) {
4814    // If this is an affine expression then we have this situation:
4815    //   Solve {0,+,A} in Range  ===  Ax in Range
4816
4817    // We know that zero is in the range.  If A is positive then we know that
4818    // the upper value of the range must be the first possible exit value.
4819    // If A is negative then the lower of the range is the last possible loop
4820    // value.  Also note that we already checked for a full range.
4821    APInt One(BitWidth,1);
4822    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4823    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4824
4825    // The exit value should be (End+A)/A.
4826    APInt ExitVal = (End + A).udiv(A);
4827    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
4828
4829    // Evaluate at the exit value.  If we really did fall out of the valid
4830    // range, then we computed our trip count, otherwise wrap around or other
4831    // things must have happened.
4832    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4833    if (Range.contains(Val->getValue()))
4834      return SE.getCouldNotCompute();  // Something strange happened
4835
4836    // Ensure that the previous value is in the range.  This is a sanity check.
4837    assert(Range.contains(
4838           EvaluateConstantChrecAtConstant(this,
4839           ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
4840           "Linear scev computation is off in a bad way!");
4841    return SE.getConstant(ExitValue);
4842  } else if (isQuadratic()) {
4843    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4844    // quadratic equation to solve it.  To do this, we must frame our problem in
4845    // terms of figuring out when zero is crossed, instead of when
4846    // Range.getUpper() is crossed.
4847    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4848    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4849    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4850
4851    // Next, solve the constructed addrec
4852    std::pair<const SCEV *,const SCEV *> Roots =
4853      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4854    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4855    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4856    if (R1) {
4857      // Pick the smallest positive root value.
4858      if (ConstantInt *CB =
4859          dyn_cast<ConstantInt>(
4860                       SE.getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
4861                         R1->getValue(), R2->getValue()))) {
4862        if (CB->getZExtValue() == false)
4863          std::swap(R1, R2);   // R1 is the minimum root now.
4864
4865        // Make sure the root is not off by one.  The returned iteration should
4866        // not be in the range, but the previous one should be.  When solving
4867        // for "X*X < 5", for example, we should not return a root of 2.
4868        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4869                                                             R1->getValue(),
4870                                                             SE);
4871        if (Range.contains(R1Val->getValue())) {
4872          // The next iteration must be out of the range...
4873          ConstantInt *NextVal =
4874                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
4875
4876          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4877          if (!Range.contains(R1Val->getValue()))
4878            return SE.getConstant(NextVal);
4879          return SE.getCouldNotCompute();  // Something strange happened
4880        }
4881
4882        // If R1 was not in the range, then it is a good return value.  Make
4883        // sure that R1-1 WAS in the range though, just in case.
4884        ConstantInt *NextVal =
4885               ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
4886        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4887        if (Range.contains(R1Val->getValue()))
4888          return R1;
4889        return SE.getCouldNotCompute();  // Something strange happened
4890      }
4891    }
4892  }
4893
4894  return SE.getCouldNotCompute();
4895}
4896
4897
4898
4899//===----------------------------------------------------------------------===//
4900//                   SCEVCallbackVH Class Implementation
4901//===----------------------------------------------------------------------===//
4902
4903void ScalarEvolution::SCEVCallbackVH::deleted() {
4904  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
4905  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4906    SE->ConstantEvolutionLoopExitValue.erase(PN);
4907  if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4908    SE->ValuesAtScopes.erase(I);
4909  SE->Scalars.erase(getValPtr());
4910  // this now dangles!
4911}
4912
4913void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4914  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
4915
4916  // Forget all the expressions associated with users of the old value,
4917  // so that future queries will recompute the expressions using the new
4918  // value.
4919  SmallVector<User *, 16> Worklist;
4920  SmallPtrSet<User *, 8> Visited;
4921  Value *Old = getValPtr();
4922  bool DeleteOld = false;
4923  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4924       UI != UE; ++UI)
4925    Worklist.push_back(*UI);
4926  while (!Worklist.empty()) {
4927    User *U = Worklist.pop_back_val();
4928    // Deleting the Old value will cause this to dangle. Postpone
4929    // that until everything else is done.
4930    if (U == Old) {
4931      DeleteOld = true;
4932      continue;
4933    }
4934    if (!Visited.insert(U))
4935      continue;
4936    if (PHINode *PN = dyn_cast<PHINode>(U))
4937      SE->ConstantEvolutionLoopExitValue.erase(PN);
4938    if (Instruction *I = dyn_cast<Instruction>(U))
4939      SE->ValuesAtScopes.erase(I);
4940    SE->Scalars.erase(U);
4941    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4942         UI != UE; ++UI)
4943      Worklist.push_back(*UI);
4944  }
4945  // Delete the Old value if it (indirectly) references itself.
4946  if (DeleteOld) {
4947    if (PHINode *PN = dyn_cast<PHINode>(Old))
4948      SE->ConstantEvolutionLoopExitValue.erase(PN);
4949    if (Instruction *I = dyn_cast<Instruction>(Old))
4950      SE->ValuesAtScopes.erase(I);
4951    SE->Scalars.erase(Old);
4952    // this now dangles!
4953  }
4954  // this may dangle!
4955}
4956
4957ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4958  : CallbackVH(V), SE(se) {}
4959
4960//===----------------------------------------------------------------------===//
4961//                   ScalarEvolution Class Implementation
4962//===----------------------------------------------------------------------===//
4963
4964ScalarEvolution::ScalarEvolution()
4965  : FunctionPass(&ID) {
4966}
4967
4968bool ScalarEvolution::runOnFunction(Function &F) {
4969  this->F = &F;
4970  LI = &getAnalysis<LoopInfo>();
4971  TD = getAnalysisIfAvailable<TargetData>();
4972  return false;
4973}
4974
4975void ScalarEvolution::releaseMemory() {
4976  Scalars.clear();
4977  BackedgeTakenCounts.clear();
4978  ConstantEvolutionLoopExitValue.clear();
4979  ValuesAtScopes.clear();
4980  UniqueSCEVs.clear();
4981  SCEVAllocator.Reset();
4982}
4983
4984void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4985  AU.setPreservesAll();
4986  AU.addRequiredTransitive<LoopInfo>();
4987}
4988
4989bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4990  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4991}
4992
4993static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4994                          const Loop *L) {
4995  // Print all inner loops first
4996  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4997    PrintLoopInfo(OS, SE, *I);
4998
4999  OS << "Loop " << L->getHeader()->getName() << ": ";
5000
5001  SmallVector<BasicBlock*, 8> ExitBlocks;
5002  L->getExitBlocks(ExitBlocks);
5003  if (ExitBlocks.size() != 1)
5004    OS << "<multiple exits> ";
5005
5006  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5007    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5008  } else {
5009    OS << "Unpredictable backedge-taken count. ";
5010  }
5011
5012  OS << "\n";
5013  OS << "Loop " << L->getHeader()->getName() << ": ";
5014
5015  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5016    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5017  } else {
5018    OS << "Unpredictable max backedge-taken count. ";
5019  }
5020
5021  OS << "\n";
5022}
5023
5024void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
5025  // ScalarEvolution's implementaiton of the print method is to print
5026  // out SCEV values of all instructions that are interesting. Doing
5027  // this potentially causes it to create new SCEV objects though,
5028  // which technically conflicts with the const qualifier. This isn't
5029  // observable from outside the class though, so casting away the
5030  // const isn't dangerous.
5031  ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
5032
5033  OS << "Classifying expressions for: " << F->getName() << "\n";
5034  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5035    if (isSCEVable(I->getType())) {
5036      OS << *I << '\n';
5037      OS << "  -->  ";
5038      const SCEV *SV = SE.getSCEV(&*I);
5039      SV->print(OS);
5040
5041      const Loop *L = LI->getLoopFor((*I).getParent());
5042
5043      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5044      if (AtUse != SV) {
5045        OS << "  -->  ";
5046        AtUse->print(OS);
5047      }
5048
5049      if (L) {
5050        OS << "\t\t" "Exits: ";
5051        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5052        if (!ExitValue->isLoopInvariant(L)) {
5053          OS << "<<Unknown>>";
5054        } else {
5055          OS << *ExitValue;
5056        }
5057      }
5058
5059      OS << "\n";
5060    }
5061
5062  OS << "Determining loop execution counts for: " << F->getName() << "\n";
5063  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5064    PrintLoopInfo(OS, &SE, *I);
5065}
5066
5067void ScalarEvolution::print(std::ostream &o, const Module *M) const {
5068  raw_os_ostream OS(o);
5069  print(OS, M);
5070}
5071