ScalarEvolution.cpp revision f5074ec9634d51472bc6e2114deea0afb6677dd8
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle.  These classes are reference counted, managed by the const SCEV *
18// class.  We only create one SCEV of a particular shape, so pointer-comparisons
19// for equality are legal.
20//
21// One important aspect of the SCEV objects is that they are never cyclic, even
22// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
23// the PHI node is one of the idioms that we can represent (e.g., a polynomial
24// recurrence) then we represent it directly as a recurrence node, otherwise we
25// represent it as a SCEVUnknown node.
26//
27// In addition to being able to represent expressions of various types, we also
28// have folders that are used to build the *canonical* representation for a
29// particular expression.  These folders are capable of using a variety of
30// rewrite rules to simplify the expressions.
31//
32// Once the folders are defined, we can implement the more interesting
33// higher-level code, such as the code that recognizes PHI nodes of various
34// types, computes the execution count of a loop, etc.
35//
36// TODO: We should use these routines and value representations to implement
37// dependence analysis!
38//
39//===----------------------------------------------------------------------===//
40//
41// There are several good references for the techniques used in this analysis.
42//
43//  Chains of recurrences -- a method to expedite the evaluation
44//  of closed-form functions
45//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46//
47//  On computational properties of chains of recurrences
48//  Eugene V. Zima
49//
50//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51//  Robert A. van Engelen
52//
53//  Efficient Symbolic Analysis for Optimizing Compilers
54//  Robert A. van Engelen
55//
56//  Using the chains of recurrences algebra for data dependence testing and
57//  induction variable substitution
58//  MS Thesis, Johnie Birch
59//
60//===----------------------------------------------------------------------===//
61
62#define DEBUG_TYPE "scalar-evolution"
63#include "llvm/Analysis/ScalarEvolutionExpressions.h"
64#include "llvm/Constants.h"
65#include "llvm/DerivedTypes.h"
66#include "llvm/GlobalVariable.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Analysis/ConstantFolding.h"
70#include "llvm/Analysis/Dominators.h"
71#include "llvm/Analysis/LoopInfo.h"
72#include "llvm/Analysis/ValueTracking.h"
73#include "llvm/Assembly/Writer.h"
74#include "llvm/Target/TargetData.h"
75#include "llvm/Support/CommandLine.h"
76#include "llvm/Support/Compiler.h"
77#include "llvm/Support/ConstantRange.h"
78#include "llvm/Support/ErrorHandling.h"
79#include "llvm/Support/GetElementPtrTypeIterator.h"
80#include "llvm/Support/InstIterator.h"
81#include "llvm/Support/MathExtras.h"
82#include "llvm/Support/raw_ostream.h"
83#include "llvm/ADT/Statistic.h"
84#include "llvm/ADT/STLExtras.h"
85#include "llvm/ADT/SmallPtrSet.h"
86#include <algorithm>
87using namespace llvm;
88
89STATISTIC(NumArrayLenItCounts,
90          "Number of trip counts computed with array length");
91STATISTIC(NumTripCountsComputed,
92          "Number of loops with predictable loop counts");
93STATISTIC(NumTripCountsNotComputed,
94          "Number of loops without predictable loop counts");
95STATISTIC(NumBruteForceTripCountsComputed,
96          "Number of loops with trip counts computed by force");
97
98static cl::opt<unsigned>
99MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
100                        cl::desc("Maximum number of iterations SCEV will "
101                                 "symbolically execute a constant "
102                                 "derived loop"),
103                        cl::init(100));
104
105static RegisterPass<ScalarEvolution>
106R("scalar-evolution", "Scalar Evolution Analysis", false, true);
107char ScalarEvolution::ID = 0;
108
109//===----------------------------------------------------------------------===//
110//                           SCEV class definitions
111//===----------------------------------------------------------------------===//
112
113//===----------------------------------------------------------------------===//
114// Implementation of the SCEV class.
115//
116
117SCEV::~SCEV() {}
118
119void SCEV::dump() const {
120  print(errs());
121  errs() << '\n';
122}
123
124void SCEV::print(std::ostream &o) const {
125  raw_os_ostream OS(o);
126  print(OS);
127}
128
129bool SCEV::isZero() const {
130  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
131    return SC->getValue()->isZero();
132  return false;
133}
134
135bool SCEV::isOne() const {
136  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
137    return SC->getValue()->isOne();
138  return false;
139}
140
141bool SCEV::isAllOnesValue() const {
142  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
143    return SC->getValue()->isAllOnesValue();
144  return false;
145}
146
147SCEVCouldNotCompute::SCEVCouldNotCompute() :
148  SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
149
150bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
151  LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
152  return false;
153}
154
155const Type *SCEVCouldNotCompute::getType() const {
156  LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
157  return 0;
158}
159
160bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
161  LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
162  return false;
163}
164
165const SCEV *
166SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete(
167                                                    const SCEV *Sym,
168                                                    const SCEV *Conc,
169                                                    ScalarEvolution &SE) const {
170  return this;
171}
172
173void SCEVCouldNotCompute::print(raw_ostream &OS) const {
174  OS << "***COULDNOTCOMPUTE***";
175}
176
177bool SCEVCouldNotCompute::classof(const SCEV *S) {
178  return S->getSCEVType() == scCouldNotCompute;
179}
180
181const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
182  FoldingSetNodeID ID;
183  ID.AddInteger(scConstant);
184  ID.AddPointer(V);
185  void *IP = 0;
186  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
187  SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
188  new (S) SCEVConstant(ID, V);
189  UniqueSCEVs.InsertNode(S, IP);
190  return S;
191}
192
193const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
194  return getConstant(ConstantInt::get(Val));
195}
196
197const SCEV *
198ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
199  return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
200}
201
202const Type *SCEVConstant::getType() const { return V->getType(); }
203
204void SCEVConstant::print(raw_ostream &OS) const {
205  WriteAsOperand(OS, V, false);
206}
207
208SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
209                           unsigned SCEVTy, const SCEV *op, const Type *ty)
210  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
211
212bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
213  return Op->dominates(BB, DT);
214}
215
216SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
217                                   const SCEV *op, const Type *ty)
218  : SCEVCastExpr(ID, scTruncate, op, ty) {
219  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
220         (Ty->isInteger() || isa<PointerType>(Ty)) &&
221         "Cannot truncate non-integer value!");
222}
223
224void SCEVTruncateExpr::print(raw_ostream &OS) const {
225  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
226}
227
228SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
229                                       const SCEV *op, const Type *ty)
230  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
231  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
232         (Ty->isInteger() || isa<PointerType>(Ty)) &&
233         "Cannot zero extend non-integer value!");
234}
235
236void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
237  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
238}
239
240SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
241                                       const SCEV *op, const Type *ty)
242  : SCEVCastExpr(ID, scSignExtend, op, ty) {
243  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
244         (Ty->isInteger() || isa<PointerType>(Ty)) &&
245         "Cannot sign extend non-integer value!");
246}
247
248void SCEVSignExtendExpr::print(raw_ostream &OS) const {
249  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
250}
251
252void SCEVCommutativeExpr::print(raw_ostream &OS) const {
253  assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
254  const char *OpStr = getOperationStr();
255  OS << "(" << *Operands[0];
256  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
257    OS << OpStr << *Operands[i];
258  OS << ")";
259}
260
261const SCEV *
262SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete(
263                                                    const SCEV *Sym,
264                                                    const SCEV *Conc,
265                                                    ScalarEvolution &SE) const {
266  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
267    const SCEV *H =
268      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
269    if (H != getOperand(i)) {
270      SmallVector<const SCEV *, 8> NewOps;
271      NewOps.reserve(getNumOperands());
272      for (unsigned j = 0; j != i; ++j)
273        NewOps.push_back(getOperand(j));
274      NewOps.push_back(H);
275      for (++i; i != e; ++i)
276        NewOps.push_back(getOperand(i)->
277                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
278
279      if (isa<SCEVAddExpr>(this))
280        return SE.getAddExpr(NewOps);
281      else if (isa<SCEVMulExpr>(this))
282        return SE.getMulExpr(NewOps);
283      else if (isa<SCEVSMaxExpr>(this))
284        return SE.getSMaxExpr(NewOps);
285      else if (isa<SCEVUMaxExpr>(this))
286        return SE.getUMaxExpr(NewOps);
287      else
288        LLVM_UNREACHABLE("Unknown commutative expr!");
289    }
290  }
291  return this;
292}
293
294bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
295  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
296    if (!getOperand(i)->dominates(BB, DT))
297      return false;
298  }
299  return true;
300}
301
302bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
303  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
304}
305
306void SCEVUDivExpr::print(raw_ostream &OS) const {
307  OS << "(" << *LHS << " /u " << *RHS << ")";
308}
309
310const Type *SCEVUDivExpr::getType() const {
311  // In most cases the types of LHS and RHS will be the same, but in some
312  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
313  // depend on the type for correctness, but handling types carefully can
314  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
315  // a pointer type than the RHS, so use the RHS' type here.
316  return RHS->getType();
317}
318
319const SCEV *
320SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym,
321                                                  const SCEV *Conc,
322                                                  ScalarEvolution &SE) const {
323  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
324    const SCEV *H =
325      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
326    if (H != getOperand(i)) {
327      SmallVector<const SCEV *, 8> NewOps;
328      NewOps.reserve(getNumOperands());
329      for (unsigned j = 0; j != i; ++j)
330        NewOps.push_back(getOperand(j));
331      NewOps.push_back(H);
332      for (++i; i != e; ++i)
333        NewOps.push_back(getOperand(i)->
334                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
335
336      return SE.getAddRecExpr(NewOps, L);
337    }
338  }
339  return this;
340}
341
342
343bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
344  // Add recurrences are never invariant in the function-body (null loop).
345  if (!QueryLoop)
346    return false;
347
348  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
349  if (QueryLoop->contains(L->getHeader()))
350    return false;
351
352  // This recurrence is variant w.r.t. QueryLoop if any of its operands
353  // are variant.
354  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
355    if (!getOperand(i)->isLoopInvariant(QueryLoop))
356      return false;
357
358  // Otherwise it's loop-invariant.
359  return true;
360}
361
362void SCEVAddRecExpr::print(raw_ostream &OS) const {
363  OS << "{" << *Operands[0];
364  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
365    OS << ",+," << *Operands[i];
366  OS << "}<" << L->getHeader()->getName() + ">";
367}
368
369bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
370  // All non-instruction values are loop invariant.  All instructions are loop
371  // invariant if they are not contained in the specified loop.
372  // Instructions are never considered invariant in the function body
373  // (null loop) because they are defined within the "loop".
374  if (Instruction *I = dyn_cast<Instruction>(V))
375    return L && !L->contains(I->getParent());
376  return true;
377}
378
379bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
380  if (Instruction *I = dyn_cast<Instruction>(getValue()))
381    return DT->dominates(I->getParent(), BB);
382  return true;
383}
384
385const Type *SCEVUnknown::getType() const {
386  return V->getType();
387}
388
389void SCEVUnknown::print(raw_ostream &OS) const {
390  WriteAsOperand(OS, V, false);
391}
392
393//===----------------------------------------------------------------------===//
394//                               SCEV Utilities
395//===----------------------------------------------------------------------===//
396
397namespace {
398  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
399  /// than the complexity of the RHS.  This comparator is used to canonicalize
400  /// expressions.
401  class VISIBILITY_HIDDEN SCEVComplexityCompare {
402    LoopInfo *LI;
403  public:
404    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
405
406    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
407      // Primarily, sort the SCEVs by their getSCEVType().
408      if (LHS->getSCEVType() != RHS->getSCEVType())
409        return LHS->getSCEVType() < RHS->getSCEVType();
410
411      // Aside from the getSCEVType() ordering, the particular ordering
412      // isn't very important except that it's beneficial to be consistent,
413      // so that (a + b) and (b + a) don't end up as different expressions.
414
415      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
416      // not as complete as it could be.
417      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
418        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
419
420        // Order pointer values after integer values. This helps SCEVExpander
421        // form GEPs.
422        if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
423          return false;
424        if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
425          return true;
426
427        // Compare getValueID values.
428        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
429          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
430
431        // Sort arguments by their position.
432        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
433          const Argument *RA = cast<Argument>(RU->getValue());
434          return LA->getArgNo() < RA->getArgNo();
435        }
436
437        // For instructions, compare their loop depth, and their opcode.
438        // This is pretty loose.
439        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
440          Instruction *RV = cast<Instruction>(RU->getValue());
441
442          // Compare loop depths.
443          if (LI->getLoopDepth(LV->getParent()) !=
444              LI->getLoopDepth(RV->getParent()))
445            return LI->getLoopDepth(LV->getParent()) <
446                   LI->getLoopDepth(RV->getParent());
447
448          // Compare opcodes.
449          if (LV->getOpcode() != RV->getOpcode())
450            return LV->getOpcode() < RV->getOpcode();
451
452          // Compare the number of operands.
453          if (LV->getNumOperands() != RV->getNumOperands())
454            return LV->getNumOperands() < RV->getNumOperands();
455        }
456
457        return false;
458      }
459
460      // Compare constant values.
461      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
462        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
463        if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
464          return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
465        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
466      }
467
468      // Compare addrec loop depths.
469      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
470        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
471        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
472          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
473      }
474
475      // Lexicographically compare n-ary expressions.
476      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
477        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
478        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
479          if (i >= RC->getNumOperands())
480            return false;
481          if (operator()(LC->getOperand(i), RC->getOperand(i)))
482            return true;
483          if (operator()(RC->getOperand(i), LC->getOperand(i)))
484            return false;
485        }
486        return LC->getNumOperands() < RC->getNumOperands();
487      }
488
489      // Lexicographically compare udiv expressions.
490      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
491        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
492        if (operator()(LC->getLHS(), RC->getLHS()))
493          return true;
494        if (operator()(RC->getLHS(), LC->getLHS()))
495          return false;
496        if (operator()(LC->getRHS(), RC->getRHS()))
497          return true;
498        if (operator()(RC->getRHS(), LC->getRHS()))
499          return false;
500        return false;
501      }
502
503      // Compare cast expressions by operand.
504      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
505        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
506        return operator()(LC->getOperand(), RC->getOperand());
507      }
508
509      LLVM_UNREACHABLE("Unknown SCEV kind!");
510      return false;
511    }
512  };
513}
514
515/// GroupByComplexity - Given a list of SCEV objects, order them by their
516/// complexity, and group objects of the same complexity together by value.
517/// When this routine is finished, we know that any duplicates in the vector are
518/// consecutive and that complexity is monotonically increasing.
519///
520/// Note that we go take special precautions to ensure that we get determinstic
521/// results from this routine.  In other words, we don't want the results of
522/// this to depend on where the addresses of various SCEV objects happened to
523/// land in memory.
524///
525static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
526                              LoopInfo *LI) {
527  if (Ops.size() < 2) return;  // Noop
528  if (Ops.size() == 2) {
529    // This is the common case, which also happens to be trivially simple.
530    // Special case it.
531    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
532      std::swap(Ops[0], Ops[1]);
533    return;
534  }
535
536  // Do the rough sort by complexity.
537  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
538
539  // Now that we are sorted by complexity, group elements of the same
540  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
541  // be extremely short in practice.  Note that we take this approach because we
542  // do not want to depend on the addresses of the objects we are grouping.
543  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
544    const SCEV *S = Ops[i];
545    unsigned Complexity = S->getSCEVType();
546
547    // If there are any objects of the same complexity and same value as this
548    // one, group them.
549    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
550      if (Ops[j] == S) { // Found a duplicate.
551        // Move it to immediately after i'th element.
552        std::swap(Ops[i+1], Ops[j]);
553        ++i;   // no need to rescan it.
554        if (i == e-2) return;  // Done!
555      }
556    }
557  }
558}
559
560
561
562//===----------------------------------------------------------------------===//
563//                      Simple SCEV method implementations
564//===----------------------------------------------------------------------===//
565
566/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
567/// Assume, K > 0.
568static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
569                                      ScalarEvolution &SE,
570                                      const Type* ResultTy) {
571  // Handle the simplest case efficiently.
572  if (K == 1)
573    return SE.getTruncateOrZeroExtend(It, ResultTy);
574
575  // We are using the following formula for BC(It, K):
576  //
577  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
578  //
579  // Suppose, W is the bitwidth of the return value.  We must be prepared for
580  // overflow.  Hence, we must assure that the result of our computation is
581  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
582  // safe in modular arithmetic.
583  //
584  // However, this code doesn't use exactly that formula; the formula it uses
585  // is something like the following, where T is the number of factors of 2 in
586  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
587  // exponentiation:
588  //
589  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
590  //
591  // This formula is trivially equivalent to the previous formula.  However,
592  // this formula can be implemented much more efficiently.  The trick is that
593  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
594  // arithmetic.  To do exact division in modular arithmetic, all we have
595  // to do is multiply by the inverse.  Therefore, this step can be done at
596  // width W.
597  //
598  // The next issue is how to safely do the division by 2^T.  The way this
599  // is done is by doing the multiplication step at a width of at least W + T
600  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
601  // when we perform the division by 2^T (which is equivalent to a right shift
602  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
603  // truncated out after the division by 2^T.
604  //
605  // In comparison to just directly using the first formula, this technique
606  // is much more efficient; using the first formula requires W * K bits,
607  // but this formula less than W + K bits. Also, the first formula requires
608  // a division step, whereas this formula only requires multiplies and shifts.
609  //
610  // It doesn't matter whether the subtraction step is done in the calculation
611  // width or the input iteration count's width; if the subtraction overflows,
612  // the result must be zero anyway.  We prefer here to do it in the width of
613  // the induction variable because it helps a lot for certain cases; CodeGen
614  // isn't smart enough to ignore the overflow, which leads to much less
615  // efficient code if the width of the subtraction is wider than the native
616  // register width.
617  //
618  // (It's possible to not widen at all by pulling out factors of 2 before
619  // the multiplication; for example, K=2 can be calculated as
620  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
621  // extra arithmetic, so it's not an obvious win, and it gets
622  // much more complicated for K > 3.)
623
624  // Protection from insane SCEVs; this bound is conservative,
625  // but it probably doesn't matter.
626  if (K > 1000)
627    return SE.getCouldNotCompute();
628
629  unsigned W = SE.getTypeSizeInBits(ResultTy);
630
631  // Calculate K! / 2^T and T; we divide out the factors of two before
632  // multiplying for calculating K! / 2^T to avoid overflow.
633  // Other overflow doesn't matter because we only care about the bottom
634  // W bits of the result.
635  APInt OddFactorial(W, 1);
636  unsigned T = 1;
637  for (unsigned i = 3; i <= K; ++i) {
638    APInt Mult(W, i);
639    unsigned TwoFactors = Mult.countTrailingZeros();
640    T += TwoFactors;
641    Mult = Mult.lshr(TwoFactors);
642    OddFactorial *= Mult;
643  }
644
645  // We need at least W + T bits for the multiplication step
646  unsigned CalculationBits = W + T;
647
648  // Calcuate 2^T, at width T+W.
649  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
650
651  // Calculate the multiplicative inverse of K! / 2^T;
652  // this multiplication factor will perform the exact division by
653  // K! / 2^T.
654  APInt Mod = APInt::getSignedMinValue(W+1);
655  APInt MultiplyFactor = OddFactorial.zext(W+1);
656  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
657  MultiplyFactor = MultiplyFactor.trunc(W);
658
659  // Calculate the product, at width T+W
660  const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
661  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
662  for (unsigned i = 1; i != K; ++i) {
663    const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
664    Dividend = SE.getMulExpr(Dividend,
665                             SE.getTruncateOrZeroExtend(S, CalculationTy));
666  }
667
668  // Divide by 2^T
669  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
670
671  // Truncate the result, and divide by K! / 2^T.
672
673  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
674                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
675}
676
677/// evaluateAtIteration - Return the value of this chain of recurrences at
678/// the specified iteration number.  We can evaluate this recurrence by
679/// multiplying each element in the chain by the binomial coefficient
680/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
681///
682///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
683///
684/// where BC(It, k) stands for binomial coefficient.
685///
686const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
687                                               ScalarEvolution &SE) const {
688  const SCEV *Result = getStart();
689  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
690    // The computation is correct in the face of overflow provided that the
691    // multiplication is performed _after_ the evaluation of the binomial
692    // coefficient.
693    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
694    if (isa<SCEVCouldNotCompute>(Coeff))
695      return Coeff;
696
697    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
698  }
699  return Result;
700}
701
702//===----------------------------------------------------------------------===//
703//                    SCEV Expression folder implementations
704//===----------------------------------------------------------------------===//
705
706const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
707                                             const Type *Ty) {
708  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
709         "This is not a truncating conversion!");
710  assert(isSCEVable(Ty) &&
711         "This is not a conversion to a SCEVable type!");
712  Ty = getEffectiveSCEVType(Ty);
713
714  FoldingSetNodeID ID;
715  ID.AddInteger(scTruncate);
716  ID.AddPointer(Op);
717  ID.AddPointer(Ty);
718  void *IP = 0;
719  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
720
721  // Fold if the operand is constant.
722  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
723    return getConstant(
724      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
725
726  // trunc(trunc(x)) --> trunc(x)
727  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
728    return getTruncateExpr(ST->getOperand(), Ty);
729
730  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
731  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
732    return getTruncateOrSignExtend(SS->getOperand(), Ty);
733
734  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
735  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
736    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
737
738  // If the input value is a chrec scev, truncate the chrec's operands.
739  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
740    SmallVector<const SCEV *, 4> Operands;
741    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
742      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
743    return getAddRecExpr(Operands, AddRec->getLoop());
744  }
745
746  // The cast wasn't folded; create an explicit cast node.
747  // Recompute the insert position, as it may have been invalidated.
748  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
749  SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
750  new (S) SCEVTruncateExpr(ID, Op, Ty);
751  UniqueSCEVs.InsertNode(S, IP);
752  return S;
753}
754
755const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
756                                               const Type *Ty) {
757  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
758         "This is not an extending conversion!");
759  assert(isSCEVable(Ty) &&
760         "This is not a conversion to a SCEVable type!");
761  Ty = getEffectiveSCEVType(Ty);
762
763  // Fold if the operand is constant.
764  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
765    const Type *IntTy = getEffectiveSCEVType(Ty);
766    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
767    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
768    return getConstant(cast<ConstantInt>(C));
769  }
770
771  // zext(zext(x)) --> zext(x)
772  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
773    return getZeroExtendExpr(SZ->getOperand(), Ty);
774
775  // Before doing any expensive analysis, check to see if we've already
776  // computed a SCEV for this Op and Ty.
777  FoldingSetNodeID ID;
778  ID.AddInteger(scZeroExtend);
779  ID.AddPointer(Op);
780  ID.AddPointer(Ty);
781  void *IP = 0;
782  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
783
784  // If the input value is a chrec scev, and we can prove that the value
785  // did not overflow the old, smaller, value, we can zero extend all of the
786  // operands (often constants).  This allows analysis of something like
787  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
788  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
789    if (AR->isAffine()) {
790      const SCEV *Start = AR->getStart();
791      const SCEV *Step = AR->getStepRecurrence(*this);
792      unsigned BitWidth = getTypeSizeInBits(AR->getType());
793      const Loop *L = AR->getLoop();
794
795      // Check whether the backedge-taken count is SCEVCouldNotCompute.
796      // Note that this serves two purposes: It filters out loops that are
797      // simply not analyzable, and it covers the case where this code is
798      // being called from within backedge-taken count analysis, such that
799      // attempting to ask for the backedge-taken count would likely result
800      // in infinite recursion. In the later case, the analysis code will
801      // cope with a conservative value, and it will take care to purge
802      // that value once it has finished.
803      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
804      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
805        // Manually compute the final value for AR, checking for
806        // overflow.
807
808        // Check whether the backedge-taken count can be losslessly casted to
809        // the addrec's type. The count is always unsigned.
810        const SCEV *CastedMaxBECount =
811          getTruncateOrZeroExtend(MaxBECount, Start->getType());
812        const SCEV *RecastedMaxBECount =
813          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
814        if (MaxBECount == RecastedMaxBECount) {
815          const Type *WideTy = IntegerType::get(BitWidth * 2);
816          // Check whether Start+Step*MaxBECount has no unsigned overflow.
817          const SCEV *ZMul =
818            getMulExpr(CastedMaxBECount,
819                       getTruncateOrZeroExtend(Step, Start->getType()));
820          const SCEV *Add = getAddExpr(Start, ZMul);
821          const SCEV *OperandExtendedAdd =
822            getAddExpr(getZeroExtendExpr(Start, WideTy),
823                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
824                                  getZeroExtendExpr(Step, WideTy)));
825          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
826            // Return the expression with the addrec on the outside.
827            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
828                                 getZeroExtendExpr(Step, Ty),
829                                 L);
830
831          // Similar to above, only this time treat the step value as signed.
832          // This covers loops that count down.
833          const SCEV *SMul =
834            getMulExpr(CastedMaxBECount,
835                       getTruncateOrSignExtend(Step, Start->getType()));
836          Add = getAddExpr(Start, SMul);
837          OperandExtendedAdd =
838            getAddExpr(getZeroExtendExpr(Start, WideTy),
839                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
840                                  getSignExtendExpr(Step, WideTy)));
841          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
842            // Return the expression with the addrec on the outside.
843            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
844                                 getSignExtendExpr(Step, Ty),
845                                 L);
846        }
847
848        // If the backedge is guarded by a comparison with the pre-inc value
849        // the addrec is safe. Also, if the entry is guarded by a comparison
850        // with the start value and the backedge is guarded by a comparison
851        // with the post-inc value, the addrec is safe.
852        if (isKnownPositive(Step)) {
853          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
854                                      getUnsignedRange(Step).getUnsignedMax());
855          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
856              (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
857               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
858                                           AR->getPostIncExpr(*this), N)))
859            // Return the expression with the addrec on the outside.
860            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
861                                 getZeroExtendExpr(Step, Ty),
862                                 L);
863        } else if (isKnownNegative(Step)) {
864          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
865                                      getSignedRange(Step).getSignedMin());
866          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
867              (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
868               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
869                                           AR->getPostIncExpr(*this), N)))
870            // Return the expression with the addrec on the outside.
871            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
872                                 getSignExtendExpr(Step, Ty),
873                                 L);
874        }
875      }
876    }
877
878  // The cast wasn't folded; create an explicit cast node.
879  // Recompute the insert position, as it may have been invalidated.
880  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
881  SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
882  new (S) SCEVZeroExtendExpr(ID, Op, Ty);
883  UniqueSCEVs.InsertNode(S, IP);
884  return S;
885}
886
887const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
888                                               const Type *Ty) {
889  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
890         "This is not an extending conversion!");
891  assert(isSCEVable(Ty) &&
892         "This is not a conversion to a SCEVable type!");
893  Ty = getEffectiveSCEVType(Ty);
894
895  // Fold if the operand is constant.
896  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
897    const Type *IntTy = getEffectiveSCEVType(Ty);
898    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
899    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
900    return getConstant(cast<ConstantInt>(C));
901  }
902
903  // sext(sext(x)) --> sext(x)
904  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
905    return getSignExtendExpr(SS->getOperand(), Ty);
906
907  // Before doing any expensive analysis, check to see if we've already
908  // computed a SCEV for this Op and Ty.
909  FoldingSetNodeID ID;
910  ID.AddInteger(scSignExtend);
911  ID.AddPointer(Op);
912  ID.AddPointer(Ty);
913  void *IP = 0;
914  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
915
916  // If the input value is a chrec scev, and we can prove that the value
917  // did not overflow the old, smaller, value, we can sign extend all of the
918  // operands (often constants).  This allows analysis of something like
919  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
920  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
921    if (AR->isAffine()) {
922      const SCEV *Start = AR->getStart();
923      const SCEV *Step = AR->getStepRecurrence(*this);
924      unsigned BitWidth = getTypeSizeInBits(AR->getType());
925      const Loop *L = AR->getLoop();
926
927      // Check whether the backedge-taken count is SCEVCouldNotCompute.
928      // Note that this serves two purposes: It filters out loops that are
929      // simply not analyzable, and it covers the case where this code is
930      // being called from within backedge-taken count analysis, such that
931      // attempting to ask for the backedge-taken count would likely result
932      // in infinite recursion. In the later case, the analysis code will
933      // cope with a conservative value, and it will take care to purge
934      // that value once it has finished.
935      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
936      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
937        // Manually compute the final value for AR, checking for
938        // overflow.
939
940        // Check whether the backedge-taken count can be losslessly casted to
941        // the addrec's type. The count is always unsigned.
942        const SCEV *CastedMaxBECount =
943          getTruncateOrZeroExtend(MaxBECount, Start->getType());
944        const SCEV *RecastedMaxBECount =
945          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
946        if (MaxBECount == RecastedMaxBECount) {
947          const Type *WideTy = IntegerType::get(BitWidth * 2);
948          // Check whether Start+Step*MaxBECount has no signed overflow.
949          const SCEV *SMul =
950            getMulExpr(CastedMaxBECount,
951                       getTruncateOrSignExtend(Step, Start->getType()));
952          const SCEV *Add = getAddExpr(Start, SMul);
953          const SCEV *OperandExtendedAdd =
954            getAddExpr(getSignExtendExpr(Start, WideTy),
955                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
956                                  getSignExtendExpr(Step, WideTy)));
957          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
958            // Return the expression with the addrec on the outside.
959            return getAddRecExpr(getSignExtendExpr(Start, Ty),
960                                 getSignExtendExpr(Step, Ty),
961                                 L);
962        }
963
964        // If the backedge is guarded by a comparison with the pre-inc value
965        // the addrec is safe. Also, if the entry is guarded by a comparison
966        // with the start value and the backedge is guarded by a comparison
967        // with the post-inc value, the addrec is safe.
968        if (isKnownPositive(Step)) {
969          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
970                                      getSignedRange(Step).getSignedMax());
971          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
972              (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
973               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
974                                           AR->getPostIncExpr(*this), N)))
975            // Return the expression with the addrec on the outside.
976            return getAddRecExpr(getSignExtendExpr(Start, Ty),
977                                 getSignExtendExpr(Step, Ty),
978                                 L);
979        } else if (isKnownNegative(Step)) {
980          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
981                                      getSignedRange(Step).getSignedMin());
982          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
983              (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
984               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
985                                           AR->getPostIncExpr(*this), N)))
986            // Return the expression with the addrec on the outside.
987            return getAddRecExpr(getSignExtendExpr(Start, Ty),
988                                 getSignExtendExpr(Step, Ty),
989                                 L);
990        }
991      }
992    }
993
994  // The cast wasn't folded; create an explicit cast node.
995  // Recompute the insert position, as it may have been invalidated.
996  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
997  SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
998  new (S) SCEVSignExtendExpr(ID, Op, Ty);
999  UniqueSCEVs.InsertNode(S, IP);
1000  return S;
1001}
1002
1003/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1004/// unspecified bits out to the given type.
1005///
1006const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1007                                             const Type *Ty) {
1008  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1009         "This is not an extending conversion!");
1010  assert(isSCEVable(Ty) &&
1011         "This is not a conversion to a SCEVable type!");
1012  Ty = getEffectiveSCEVType(Ty);
1013
1014  // Sign-extend negative constants.
1015  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1016    if (SC->getValue()->getValue().isNegative())
1017      return getSignExtendExpr(Op, Ty);
1018
1019  // Peel off a truncate cast.
1020  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1021    const SCEV *NewOp = T->getOperand();
1022    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1023      return getAnyExtendExpr(NewOp, Ty);
1024    return getTruncateOrNoop(NewOp, Ty);
1025  }
1026
1027  // Next try a zext cast. If the cast is folded, use it.
1028  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1029  if (!isa<SCEVZeroExtendExpr>(ZExt))
1030    return ZExt;
1031
1032  // Next try a sext cast. If the cast is folded, use it.
1033  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1034  if (!isa<SCEVSignExtendExpr>(SExt))
1035    return SExt;
1036
1037  // If the expression is obviously signed, use the sext cast value.
1038  if (isa<SCEVSMaxExpr>(Op))
1039    return SExt;
1040
1041  // Absent any other information, use the zext cast value.
1042  return ZExt;
1043}
1044
1045/// CollectAddOperandsWithScales - Process the given Ops list, which is
1046/// a list of operands to be added under the given scale, update the given
1047/// map. This is a helper function for getAddRecExpr. As an example of
1048/// what it does, given a sequence of operands that would form an add
1049/// expression like this:
1050///
1051///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1052///
1053/// where A and B are constants, update the map with these values:
1054///
1055///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1056///
1057/// and add 13 + A*B*29 to AccumulatedConstant.
1058/// This will allow getAddRecExpr to produce this:
1059///
1060///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1061///
1062/// This form often exposes folding opportunities that are hidden in
1063/// the original operand list.
1064///
1065/// Return true iff it appears that any interesting folding opportunities
1066/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1067/// the common case where no interesting opportunities are present, and
1068/// is also used as a check to avoid infinite recursion.
1069///
1070static bool
1071CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1072                             SmallVector<const SCEV *, 8> &NewOps,
1073                             APInt &AccumulatedConstant,
1074                             const SmallVectorImpl<const SCEV *> &Ops,
1075                             const APInt &Scale,
1076                             ScalarEvolution &SE) {
1077  bool Interesting = false;
1078
1079  // Iterate over the add operands.
1080  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1081    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1082    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1083      APInt NewScale =
1084        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1085      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1086        // A multiplication of a constant with another add; recurse.
1087        Interesting |=
1088          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1089                                       cast<SCEVAddExpr>(Mul->getOperand(1))
1090                                         ->getOperands(),
1091                                       NewScale, SE);
1092      } else {
1093        // A multiplication of a constant with some other value. Update
1094        // the map.
1095        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1096        const SCEV *Key = SE.getMulExpr(MulOps);
1097        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1098          M.insert(std::make_pair(Key, NewScale));
1099        if (Pair.second) {
1100          NewOps.push_back(Pair.first->first);
1101        } else {
1102          Pair.first->second += NewScale;
1103          // The map already had an entry for this value, which may indicate
1104          // a folding opportunity.
1105          Interesting = true;
1106        }
1107      }
1108    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1109      // Pull a buried constant out to the outside.
1110      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1111        Interesting = true;
1112      AccumulatedConstant += Scale * C->getValue()->getValue();
1113    } else {
1114      // An ordinary operand. Update the map.
1115      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1116        M.insert(std::make_pair(Ops[i], Scale));
1117      if (Pair.second) {
1118        NewOps.push_back(Pair.first->first);
1119      } else {
1120        Pair.first->second += Scale;
1121        // The map already had an entry for this value, which may indicate
1122        // a folding opportunity.
1123        Interesting = true;
1124      }
1125    }
1126  }
1127
1128  return Interesting;
1129}
1130
1131namespace {
1132  struct APIntCompare {
1133    bool operator()(const APInt &LHS, const APInt &RHS) const {
1134      return LHS.ult(RHS);
1135    }
1136  };
1137}
1138
1139/// getAddExpr - Get a canonical add expression, or something simpler if
1140/// possible.
1141const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1142  assert(!Ops.empty() && "Cannot get empty add!");
1143  if (Ops.size() == 1) return Ops[0];
1144#ifndef NDEBUG
1145  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1146    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1147           getEffectiveSCEVType(Ops[0]->getType()) &&
1148           "SCEVAddExpr operand types don't match!");
1149#endif
1150
1151  // Sort by complexity, this groups all similar expression types together.
1152  GroupByComplexity(Ops, LI);
1153
1154  // If there are any constants, fold them together.
1155  unsigned Idx = 0;
1156  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1157    ++Idx;
1158    assert(Idx < Ops.size());
1159    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1160      // We found two constants, fold them together!
1161      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1162                           RHSC->getValue()->getValue());
1163      if (Ops.size() == 2) return Ops[0];
1164      Ops.erase(Ops.begin()+1);  // Erase the folded element
1165      LHSC = cast<SCEVConstant>(Ops[0]);
1166    }
1167
1168    // If we are left with a constant zero being added, strip it off.
1169    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1170      Ops.erase(Ops.begin());
1171      --Idx;
1172    }
1173  }
1174
1175  if (Ops.size() == 1) return Ops[0];
1176
1177  // Okay, check to see if the same value occurs in the operand list twice.  If
1178  // so, merge them together into an multiply expression.  Since we sorted the
1179  // list, these values are required to be adjacent.
1180  const Type *Ty = Ops[0]->getType();
1181  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1182    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1183      // Found a match, merge the two values into a multiply, and add any
1184      // remaining values to the result.
1185      const SCEV *Two = getIntegerSCEV(2, Ty);
1186      const SCEV *Mul = getMulExpr(Ops[i], Two);
1187      if (Ops.size() == 2)
1188        return Mul;
1189      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1190      Ops.push_back(Mul);
1191      return getAddExpr(Ops);
1192    }
1193
1194  // Check for truncates. If all the operands are truncated from the same
1195  // type, see if factoring out the truncate would permit the result to be
1196  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1197  // if the contents of the resulting outer trunc fold to something simple.
1198  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1199    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1200    const Type *DstType = Trunc->getType();
1201    const Type *SrcType = Trunc->getOperand()->getType();
1202    SmallVector<const SCEV *, 8> LargeOps;
1203    bool Ok = true;
1204    // Check all the operands to see if they can be represented in the
1205    // source type of the truncate.
1206    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1207      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1208        if (T->getOperand()->getType() != SrcType) {
1209          Ok = false;
1210          break;
1211        }
1212        LargeOps.push_back(T->getOperand());
1213      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1214        // This could be either sign or zero extension, but sign extension
1215        // is much more likely to be foldable here.
1216        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1217      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1218        SmallVector<const SCEV *, 8> LargeMulOps;
1219        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1220          if (const SCEVTruncateExpr *T =
1221                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1222            if (T->getOperand()->getType() != SrcType) {
1223              Ok = false;
1224              break;
1225            }
1226            LargeMulOps.push_back(T->getOperand());
1227          } else if (const SCEVConstant *C =
1228                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1229            // This could be either sign or zero extension, but sign extension
1230            // is much more likely to be foldable here.
1231            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1232          } else {
1233            Ok = false;
1234            break;
1235          }
1236        }
1237        if (Ok)
1238          LargeOps.push_back(getMulExpr(LargeMulOps));
1239      } else {
1240        Ok = false;
1241        break;
1242      }
1243    }
1244    if (Ok) {
1245      // Evaluate the expression in the larger type.
1246      const SCEV *Fold = getAddExpr(LargeOps);
1247      // If it folds to something simple, use it. Otherwise, don't.
1248      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1249        return getTruncateExpr(Fold, DstType);
1250    }
1251  }
1252
1253  // Skip past any other cast SCEVs.
1254  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1255    ++Idx;
1256
1257  // If there are add operands they would be next.
1258  if (Idx < Ops.size()) {
1259    bool DeletedAdd = false;
1260    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1261      // If we have an add, expand the add operands onto the end of the operands
1262      // list.
1263      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1264      Ops.erase(Ops.begin()+Idx);
1265      DeletedAdd = true;
1266    }
1267
1268    // If we deleted at least one add, we added operands to the end of the list,
1269    // and they are not necessarily sorted.  Recurse to resort and resimplify
1270    // any operands we just aquired.
1271    if (DeletedAdd)
1272      return getAddExpr(Ops);
1273  }
1274
1275  // Skip over the add expression until we get to a multiply.
1276  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1277    ++Idx;
1278
1279  // Check to see if there are any folding opportunities present with
1280  // operands multiplied by constant values.
1281  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1282    uint64_t BitWidth = getTypeSizeInBits(Ty);
1283    DenseMap<const SCEV *, APInt> M;
1284    SmallVector<const SCEV *, 8> NewOps;
1285    APInt AccumulatedConstant(BitWidth, 0);
1286    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1287                                     Ops, APInt(BitWidth, 1), *this)) {
1288      // Some interesting folding opportunity is present, so its worthwhile to
1289      // re-generate the operands list. Group the operands by constant scale,
1290      // to avoid multiplying by the same constant scale multiple times.
1291      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1292      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1293           E = NewOps.end(); I != E; ++I)
1294        MulOpLists[M.find(*I)->second].push_back(*I);
1295      // Re-generate the operands list.
1296      Ops.clear();
1297      if (AccumulatedConstant != 0)
1298        Ops.push_back(getConstant(AccumulatedConstant));
1299      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1300           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1301        if (I->first != 0)
1302          Ops.push_back(getMulExpr(getConstant(I->first),
1303                                   getAddExpr(I->second)));
1304      if (Ops.empty())
1305        return getIntegerSCEV(0, Ty);
1306      if (Ops.size() == 1)
1307        return Ops[0];
1308      return getAddExpr(Ops);
1309    }
1310  }
1311
1312  // If we are adding something to a multiply expression, make sure the
1313  // something is not already an operand of the multiply.  If so, merge it into
1314  // the multiply.
1315  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1316    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1317    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1318      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1319      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1320        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1321          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1322          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1323          if (Mul->getNumOperands() != 2) {
1324            // If the multiply has more than two operands, we must get the
1325            // Y*Z term.
1326            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1327            MulOps.erase(MulOps.begin()+MulOp);
1328            InnerMul = getMulExpr(MulOps);
1329          }
1330          const SCEV *One = getIntegerSCEV(1, Ty);
1331          const SCEV *AddOne = getAddExpr(InnerMul, One);
1332          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1333          if (Ops.size() == 2) return OuterMul;
1334          if (AddOp < Idx) {
1335            Ops.erase(Ops.begin()+AddOp);
1336            Ops.erase(Ops.begin()+Idx-1);
1337          } else {
1338            Ops.erase(Ops.begin()+Idx);
1339            Ops.erase(Ops.begin()+AddOp-1);
1340          }
1341          Ops.push_back(OuterMul);
1342          return getAddExpr(Ops);
1343        }
1344
1345      // Check this multiply against other multiplies being added together.
1346      for (unsigned OtherMulIdx = Idx+1;
1347           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1348           ++OtherMulIdx) {
1349        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1350        // If MulOp occurs in OtherMul, we can fold the two multiplies
1351        // together.
1352        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1353             OMulOp != e; ++OMulOp)
1354          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1355            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1356            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1357            if (Mul->getNumOperands() != 2) {
1358              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1359                                                  Mul->op_end());
1360              MulOps.erase(MulOps.begin()+MulOp);
1361              InnerMul1 = getMulExpr(MulOps);
1362            }
1363            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1364            if (OtherMul->getNumOperands() != 2) {
1365              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1366                                                  OtherMul->op_end());
1367              MulOps.erase(MulOps.begin()+OMulOp);
1368              InnerMul2 = getMulExpr(MulOps);
1369            }
1370            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1371            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1372            if (Ops.size() == 2) return OuterMul;
1373            Ops.erase(Ops.begin()+Idx);
1374            Ops.erase(Ops.begin()+OtherMulIdx-1);
1375            Ops.push_back(OuterMul);
1376            return getAddExpr(Ops);
1377          }
1378      }
1379    }
1380  }
1381
1382  // If there are any add recurrences in the operands list, see if any other
1383  // added values are loop invariant.  If so, we can fold them into the
1384  // recurrence.
1385  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1386    ++Idx;
1387
1388  // Scan over all recurrences, trying to fold loop invariants into them.
1389  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1390    // Scan all of the other operands to this add and add them to the vector if
1391    // they are loop invariant w.r.t. the recurrence.
1392    SmallVector<const SCEV *, 8> LIOps;
1393    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1394    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1395      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1396        LIOps.push_back(Ops[i]);
1397        Ops.erase(Ops.begin()+i);
1398        --i; --e;
1399      }
1400
1401    // If we found some loop invariants, fold them into the recurrence.
1402    if (!LIOps.empty()) {
1403      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1404      LIOps.push_back(AddRec->getStart());
1405
1406      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1407                                           AddRec->op_end());
1408      AddRecOps[0] = getAddExpr(LIOps);
1409
1410      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1411      // If all of the other operands were loop invariant, we are done.
1412      if (Ops.size() == 1) return NewRec;
1413
1414      // Otherwise, add the folded AddRec by the non-liv parts.
1415      for (unsigned i = 0;; ++i)
1416        if (Ops[i] == AddRec) {
1417          Ops[i] = NewRec;
1418          break;
1419        }
1420      return getAddExpr(Ops);
1421    }
1422
1423    // Okay, if there weren't any loop invariants to be folded, check to see if
1424    // there are multiple AddRec's with the same loop induction variable being
1425    // added together.  If so, we can fold them.
1426    for (unsigned OtherIdx = Idx+1;
1427         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1428      if (OtherIdx != Idx) {
1429        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1430        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1431          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1432          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1433                                              AddRec->op_end());
1434          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1435            if (i >= NewOps.size()) {
1436              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1437                            OtherAddRec->op_end());
1438              break;
1439            }
1440            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1441          }
1442          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1443
1444          if (Ops.size() == 2) return NewAddRec;
1445
1446          Ops.erase(Ops.begin()+Idx);
1447          Ops.erase(Ops.begin()+OtherIdx-1);
1448          Ops.push_back(NewAddRec);
1449          return getAddExpr(Ops);
1450        }
1451      }
1452
1453    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1454    // next one.
1455  }
1456
1457  // Okay, it looks like we really DO need an add expr.  Check to see if we
1458  // already have one, otherwise create a new one.
1459  FoldingSetNodeID ID;
1460  ID.AddInteger(scAddExpr);
1461  ID.AddInteger(Ops.size());
1462  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1463    ID.AddPointer(Ops[i]);
1464  void *IP = 0;
1465  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1466  SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1467  new (S) SCEVAddExpr(ID, Ops);
1468  UniqueSCEVs.InsertNode(S, IP);
1469  return S;
1470}
1471
1472
1473/// getMulExpr - Get a canonical multiply expression, or something simpler if
1474/// possible.
1475const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1476  assert(!Ops.empty() && "Cannot get empty mul!");
1477#ifndef NDEBUG
1478  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1479    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1480           getEffectiveSCEVType(Ops[0]->getType()) &&
1481           "SCEVMulExpr operand types don't match!");
1482#endif
1483
1484  // Sort by complexity, this groups all similar expression types together.
1485  GroupByComplexity(Ops, LI);
1486
1487  // If there are any constants, fold them together.
1488  unsigned Idx = 0;
1489  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1490
1491    // C1*(C2+V) -> C1*C2 + C1*V
1492    if (Ops.size() == 2)
1493      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1494        if (Add->getNumOperands() == 2 &&
1495            isa<SCEVConstant>(Add->getOperand(0)))
1496          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1497                            getMulExpr(LHSC, Add->getOperand(1)));
1498
1499
1500    ++Idx;
1501    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1502      // We found two constants, fold them together!
1503      ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() *
1504                                           RHSC->getValue()->getValue());
1505      Ops[0] = getConstant(Fold);
1506      Ops.erase(Ops.begin()+1);  // Erase the folded element
1507      if (Ops.size() == 1) return Ops[0];
1508      LHSC = cast<SCEVConstant>(Ops[0]);
1509    }
1510
1511    // If we are left with a constant one being multiplied, strip it off.
1512    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1513      Ops.erase(Ops.begin());
1514      --Idx;
1515    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1516      // If we have a multiply of zero, it will always be zero.
1517      return Ops[0];
1518    }
1519  }
1520
1521  // Skip over the add expression until we get to a multiply.
1522  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1523    ++Idx;
1524
1525  if (Ops.size() == 1)
1526    return Ops[0];
1527
1528  // If there are mul operands inline them all into this expression.
1529  if (Idx < Ops.size()) {
1530    bool DeletedMul = false;
1531    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1532      // If we have an mul, expand the mul operands onto the end of the operands
1533      // list.
1534      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1535      Ops.erase(Ops.begin()+Idx);
1536      DeletedMul = true;
1537    }
1538
1539    // If we deleted at least one mul, we added operands to the end of the list,
1540    // and they are not necessarily sorted.  Recurse to resort and resimplify
1541    // any operands we just aquired.
1542    if (DeletedMul)
1543      return getMulExpr(Ops);
1544  }
1545
1546  // If there are any add recurrences in the operands list, see if any other
1547  // added values are loop invariant.  If so, we can fold them into the
1548  // recurrence.
1549  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1550    ++Idx;
1551
1552  // Scan over all recurrences, trying to fold loop invariants into them.
1553  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1554    // Scan all of the other operands to this mul and add them to the vector if
1555    // they are loop invariant w.r.t. the recurrence.
1556    SmallVector<const SCEV *, 8> LIOps;
1557    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1558    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1559      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1560        LIOps.push_back(Ops[i]);
1561        Ops.erase(Ops.begin()+i);
1562        --i; --e;
1563      }
1564
1565    // If we found some loop invariants, fold them into the recurrence.
1566    if (!LIOps.empty()) {
1567      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1568      SmallVector<const SCEV *, 4> NewOps;
1569      NewOps.reserve(AddRec->getNumOperands());
1570      if (LIOps.size() == 1) {
1571        const SCEV *Scale = LIOps[0];
1572        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1573          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1574      } else {
1575        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1576          SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1577          MulOps.push_back(AddRec->getOperand(i));
1578          NewOps.push_back(getMulExpr(MulOps));
1579        }
1580      }
1581
1582      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1583
1584      // If all of the other operands were loop invariant, we are done.
1585      if (Ops.size() == 1) return NewRec;
1586
1587      // Otherwise, multiply the folded AddRec by the non-liv parts.
1588      for (unsigned i = 0;; ++i)
1589        if (Ops[i] == AddRec) {
1590          Ops[i] = NewRec;
1591          break;
1592        }
1593      return getMulExpr(Ops);
1594    }
1595
1596    // Okay, if there weren't any loop invariants to be folded, check to see if
1597    // there are multiple AddRec's with the same loop induction variable being
1598    // multiplied together.  If so, we can fold them.
1599    for (unsigned OtherIdx = Idx+1;
1600         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1601      if (OtherIdx != Idx) {
1602        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1603        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1604          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1605          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1606          const SCEV *NewStart = getMulExpr(F->getStart(),
1607                                                 G->getStart());
1608          const SCEV *B = F->getStepRecurrence(*this);
1609          const SCEV *D = G->getStepRecurrence(*this);
1610          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1611                                          getMulExpr(G, B),
1612                                          getMulExpr(B, D));
1613          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1614                                               F->getLoop());
1615          if (Ops.size() == 2) return NewAddRec;
1616
1617          Ops.erase(Ops.begin()+Idx);
1618          Ops.erase(Ops.begin()+OtherIdx-1);
1619          Ops.push_back(NewAddRec);
1620          return getMulExpr(Ops);
1621        }
1622      }
1623
1624    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1625    // next one.
1626  }
1627
1628  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1629  // already have one, otherwise create a new one.
1630  FoldingSetNodeID ID;
1631  ID.AddInteger(scMulExpr);
1632  ID.AddInteger(Ops.size());
1633  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1634    ID.AddPointer(Ops[i]);
1635  void *IP = 0;
1636  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1637  SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1638  new (S) SCEVMulExpr(ID, Ops);
1639  UniqueSCEVs.InsertNode(S, IP);
1640  return S;
1641}
1642
1643/// getUDivExpr - Get a canonical multiply expression, or something simpler if
1644/// possible.
1645const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1646                                         const SCEV *RHS) {
1647  assert(getEffectiveSCEVType(LHS->getType()) ==
1648         getEffectiveSCEVType(RHS->getType()) &&
1649         "SCEVUDivExpr operand types don't match!");
1650
1651  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1652    if (RHSC->getValue()->equalsInt(1))
1653      return LHS;                            // X udiv 1 --> x
1654    if (RHSC->isZero())
1655      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1656
1657    // Determine if the division can be folded into the operands of
1658    // its operands.
1659    // TODO: Generalize this to non-constants by using known-bits information.
1660    const Type *Ty = LHS->getType();
1661    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1662    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1663    // For non-power-of-two values, effectively round the value up to the
1664    // nearest power of two.
1665    if (!RHSC->getValue()->getValue().isPowerOf2())
1666      ++MaxShiftAmt;
1667    const IntegerType *ExtTy =
1668      IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1669    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1670    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1671      if (const SCEVConstant *Step =
1672            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1673        if (!Step->getValue()->getValue()
1674              .urem(RHSC->getValue()->getValue()) &&
1675            getZeroExtendExpr(AR, ExtTy) ==
1676            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1677                          getZeroExtendExpr(Step, ExtTy),
1678                          AR->getLoop())) {
1679          SmallVector<const SCEV *, 4> Operands;
1680          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1681            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1682          return getAddRecExpr(Operands, AR->getLoop());
1683        }
1684    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1685    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1686      SmallVector<const SCEV *, 4> Operands;
1687      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1688        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1689      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1690        // Find an operand that's safely divisible.
1691        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1692          const SCEV *Op = M->getOperand(i);
1693          const SCEV *Div = getUDivExpr(Op, RHSC);
1694          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1695            const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1696            Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1697                                                  MOperands.end());
1698            Operands[i] = Div;
1699            return getMulExpr(Operands);
1700          }
1701        }
1702    }
1703    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1704    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1705      SmallVector<const SCEV *, 4> Operands;
1706      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1707        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1708      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1709        Operands.clear();
1710        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1711          const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1712          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1713            break;
1714          Operands.push_back(Op);
1715        }
1716        if (Operands.size() == A->getNumOperands())
1717          return getAddExpr(Operands);
1718      }
1719    }
1720
1721    // Fold if both operands are constant.
1722    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1723      Constant *LHSCV = LHSC->getValue();
1724      Constant *RHSCV = RHSC->getValue();
1725      return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1726                                                                 RHSCV)));
1727    }
1728  }
1729
1730  FoldingSetNodeID ID;
1731  ID.AddInteger(scUDivExpr);
1732  ID.AddPointer(LHS);
1733  ID.AddPointer(RHS);
1734  void *IP = 0;
1735  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1736  SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1737  new (S) SCEVUDivExpr(ID, LHS, RHS);
1738  UniqueSCEVs.InsertNode(S, IP);
1739  return S;
1740}
1741
1742
1743/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1744/// Simplify the expression as much as possible.
1745const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1746                               const SCEV *Step, const Loop *L) {
1747  SmallVector<const SCEV *, 4> Operands;
1748  Operands.push_back(Start);
1749  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1750    if (StepChrec->getLoop() == L) {
1751      Operands.insert(Operands.end(), StepChrec->op_begin(),
1752                      StepChrec->op_end());
1753      return getAddRecExpr(Operands, L);
1754    }
1755
1756  Operands.push_back(Step);
1757  return getAddRecExpr(Operands, L);
1758}
1759
1760/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1761/// Simplify the expression as much as possible.
1762const SCEV *
1763ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1764                               const Loop *L) {
1765  if (Operands.size() == 1) return Operands[0];
1766#ifndef NDEBUG
1767  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1768    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1769           getEffectiveSCEVType(Operands[0]->getType()) &&
1770           "SCEVAddRecExpr operand types don't match!");
1771#endif
1772
1773  if (Operands.back()->isZero()) {
1774    Operands.pop_back();
1775    return getAddRecExpr(Operands, L);             // {X,+,0}  -->  X
1776  }
1777
1778  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1779  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1780    const Loop* NestedLoop = NestedAR->getLoop();
1781    if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1782      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1783                                                NestedAR->op_end());
1784      Operands[0] = NestedAR->getStart();
1785      // AddRecs require their operands be loop-invariant with respect to their
1786      // loops. Don't perform this transformation if it would break this
1787      // requirement.
1788      bool AllInvariant = true;
1789      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1790        if (!Operands[i]->isLoopInvariant(L)) {
1791          AllInvariant = false;
1792          break;
1793        }
1794      if (AllInvariant) {
1795        NestedOperands[0] = getAddRecExpr(Operands, L);
1796        AllInvariant = true;
1797        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1798          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1799            AllInvariant = false;
1800            break;
1801          }
1802        if (AllInvariant)
1803          // Ok, both add recurrences are valid after the transformation.
1804          return getAddRecExpr(NestedOperands, NestedLoop);
1805      }
1806      // Reset Operands to its original state.
1807      Operands[0] = NestedAR;
1808    }
1809  }
1810
1811  FoldingSetNodeID ID;
1812  ID.AddInteger(scAddRecExpr);
1813  ID.AddInteger(Operands.size());
1814  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1815    ID.AddPointer(Operands[i]);
1816  ID.AddPointer(L);
1817  void *IP = 0;
1818  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1819  SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1820  new (S) SCEVAddRecExpr(ID, Operands, L);
1821  UniqueSCEVs.InsertNode(S, IP);
1822  return S;
1823}
1824
1825const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1826                                         const SCEV *RHS) {
1827  SmallVector<const SCEV *, 2> Ops;
1828  Ops.push_back(LHS);
1829  Ops.push_back(RHS);
1830  return getSMaxExpr(Ops);
1831}
1832
1833const SCEV *
1834ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1835  assert(!Ops.empty() && "Cannot get empty smax!");
1836  if (Ops.size() == 1) return Ops[0];
1837#ifndef NDEBUG
1838  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1839    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1840           getEffectiveSCEVType(Ops[0]->getType()) &&
1841           "SCEVSMaxExpr operand types don't match!");
1842#endif
1843
1844  // Sort by complexity, this groups all similar expression types together.
1845  GroupByComplexity(Ops, LI);
1846
1847  // If there are any constants, fold them together.
1848  unsigned Idx = 0;
1849  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1850    ++Idx;
1851    assert(Idx < Ops.size());
1852    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1853      // We found two constants, fold them together!
1854      ConstantInt *Fold = ConstantInt::get(
1855                              APIntOps::smax(LHSC->getValue()->getValue(),
1856                                             RHSC->getValue()->getValue()));
1857      Ops[0] = getConstant(Fold);
1858      Ops.erase(Ops.begin()+1);  // Erase the folded element
1859      if (Ops.size() == 1) return Ops[0];
1860      LHSC = cast<SCEVConstant>(Ops[0]);
1861    }
1862
1863    // If we are left with a constant minimum-int, strip it off.
1864    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1865      Ops.erase(Ops.begin());
1866      --Idx;
1867    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1868      // If we have an smax with a constant maximum-int, it will always be
1869      // maximum-int.
1870      return Ops[0];
1871    }
1872  }
1873
1874  if (Ops.size() == 1) return Ops[0];
1875
1876  // Find the first SMax
1877  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1878    ++Idx;
1879
1880  // Check to see if one of the operands is an SMax. If so, expand its operands
1881  // onto our operand list, and recurse to simplify.
1882  if (Idx < Ops.size()) {
1883    bool DeletedSMax = false;
1884    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1885      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1886      Ops.erase(Ops.begin()+Idx);
1887      DeletedSMax = true;
1888    }
1889
1890    if (DeletedSMax)
1891      return getSMaxExpr(Ops);
1892  }
1893
1894  // Okay, check to see if the same value occurs in the operand list twice.  If
1895  // so, delete one.  Since we sorted the list, these values are required to
1896  // be adjacent.
1897  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1898    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
1899      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1900      --i; --e;
1901    }
1902
1903  if (Ops.size() == 1) return Ops[0];
1904
1905  assert(!Ops.empty() && "Reduced smax down to nothing!");
1906
1907  // Okay, it looks like we really DO need an smax expr.  Check to see if we
1908  // already have one, otherwise create a new one.
1909  FoldingSetNodeID ID;
1910  ID.AddInteger(scSMaxExpr);
1911  ID.AddInteger(Ops.size());
1912  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1913    ID.AddPointer(Ops[i]);
1914  void *IP = 0;
1915  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1916  SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1917  new (S) SCEVSMaxExpr(ID, Ops);
1918  UniqueSCEVs.InsertNode(S, IP);
1919  return S;
1920}
1921
1922const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1923                                         const SCEV *RHS) {
1924  SmallVector<const SCEV *, 2> Ops;
1925  Ops.push_back(LHS);
1926  Ops.push_back(RHS);
1927  return getUMaxExpr(Ops);
1928}
1929
1930const SCEV *
1931ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1932  assert(!Ops.empty() && "Cannot get empty umax!");
1933  if (Ops.size() == 1) return Ops[0];
1934#ifndef NDEBUG
1935  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1936    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1937           getEffectiveSCEVType(Ops[0]->getType()) &&
1938           "SCEVUMaxExpr operand types don't match!");
1939#endif
1940
1941  // Sort by complexity, this groups all similar expression types together.
1942  GroupByComplexity(Ops, LI);
1943
1944  // If there are any constants, fold them together.
1945  unsigned Idx = 0;
1946  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1947    ++Idx;
1948    assert(Idx < Ops.size());
1949    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1950      // We found two constants, fold them together!
1951      ConstantInt *Fold = ConstantInt::get(
1952                              APIntOps::umax(LHSC->getValue()->getValue(),
1953                                             RHSC->getValue()->getValue()));
1954      Ops[0] = getConstant(Fold);
1955      Ops.erase(Ops.begin()+1);  // Erase the folded element
1956      if (Ops.size() == 1) return Ops[0];
1957      LHSC = cast<SCEVConstant>(Ops[0]);
1958    }
1959
1960    // If we are left with a constant minimum-int, strip it off.
1961    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1962      Ops.erase(Ops.begin());
1963      --Idx;
1964    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1965      // If we have an umax with a constant maximum-int, it will always be
1966      // maximum-int.
1967      return Ops[0];
1968    }
1969  }
1970
1971  if (Ops.size() == 1) return Ops[0];
1972
1973  // Find the first UMax
1974  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1975    ++Idx;
1976
1977  // Check to see if one of the operands is a UMax. If so, expand its operands
1978  // onto our operand list, and recurse to simplify.
1979  if (Idx < Ops.size()) {
1980    bool DeletedUMax = false;
1981    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1982      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1983      Ops.erase(Ops.begin()+Idx);
1984      DeletedUMax = true;
1985    }
1986
1987    if (DeletedUMax)
1988      return getUMaxExpr(Ops);
1989  }
1990
1991  // Okay, check to see if the same value occurs in the operand list twice.  If
1992  // so, delete one.  Since we sorted the list, these values are required to
1993  // be adjacent.
1994  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1995    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
1996      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1997      --i; --e;
1998    }
1999
2000  if (Ops.size() == 1) return Ops[0];
2001
2002  assert(!Ops.empty() && "Reduced umax down to nothing!");
2003
2004  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2005  // already have one, otherwise create a new one.
2006  FoldingSetNodeID ID;
2007  ID.AddInteger(scUMaxExpr);
2008  ID.AddInteger(Ops.size());
2009  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2010    ID.AddPointer(Ops[i]);
2011  void *IP = 0;
2012  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2013  SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
2014  new (S) SCEVUMaxExpr(ID, Ops);
2015  UniqueSCEVs.InsertNode(S, IP);
2016  return S;
2017}
2018
2019const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2020                                         const SCEV *RHS) {
2021  // ~smax(~x, ~y) == smin(x, y).
2022  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2023}
2024
2025const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2026                                         const SCEV *RHS) {
2027  // ~umax(~x, ~y) == umin(x, y)
2028  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2029}
2030
2031const SCEV *ScalarEvolution::getUnknown(Value *V) {
2032  // Don't attempt to do anything other than create a SCEVUnknown object
2033  // here.  createSCEV only calls getUnknown after checking for all other
2034  // interesting possibilities, and any other code that calls getUnknown
2035  // is doing so in order to hide a value from SCEV canonicalization.
2036
2037  FoldingSetNodeID ID;
2038  ID.AddInteger(scUnknown);
2039  ID.AddPointer(V);
2040  void *IP = 0;
2041  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2042  SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2043  new (S) SCEVUnknown(ID, V);
2044  UniqueSCEVs.InsertNode(S, IP);
2045  return S;
2046}
2047
2048//===----------------------------------------------------------------------===//
2049//            Basic SCEV Analysis and PHI Idiom Recognition Code
2050//
2051
2052/// isSCEVable - Test if values of the given type are analyzable within
2053/// the SCEV framework. This primarily includes integer types, and it
2054/// can optionally include pointer types if the ScalarEvolution class
2055/// has access to target-specific information.
2056bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2057  // Integers are always SCEVable.
2058  if (Ty->isInteger())
2059    return true;
2060
2061  // Pointers are SCEVable if TargetData information is available
2062  // to provide pointer size information.
2063  if (isa<PointerType>(Ty))
2064    return TD != NULL;
2065
2066  // Otherwise it's not SCEVable.
2067  return false;
2068}
2069
2070/// getTypeSizeInBits - Return the size in bits of the specified type,
2071/// for which isSCEVable must return true.
2072uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2073  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2074
2075  // If we have a TargetData, use it!
2076  if (TD)
2077    return TD->getTypeSizeInBits(Ty);
2078
2079  // Otherwise, we support only integer types.
2080  assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2081  return Ty->getPrimitiveSizeInBits();
2082}
2083
2084/// getEffectiveSCEVType - Return a type with the same bitwidth as
2085/// the given type and which represents how SCEV will treat the given
2086/// type, for which isSCEVable must return true. For pointer types,
2087/// this is the pointer-sized integer type.
2088const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2089  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2090
2091  if (Ty->isInteger())
2092    return Ty;
2093
2094  assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2095  return TD->getIntPtrType();
2096}
2097
2098const SCEV *ScalarEvolution::getCouldNotCompute() {
2099  return &CouldNotCompute;
2100}
2101
2102/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2103/// expression and create a new one.
2104const SCEV *ScalarEvolution::getSCEV(Value *V) {
2105  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2106
2107  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2108  if (I != Scalars.end()) return I->second;
2109  const SCEV *S = createSCEV(V);
2110  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2111  return S;
2112}
2113
2114/// getIntegerSCEV - Given a SCEVable type, create a constant for the
2115/// specified signed integer value and return a SCEV for the constant.
2116const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2117  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2118  return getConstant(ConstantInt::get(ITy, Val));
2119}
2120
2121/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2122///
2123const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2124  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2125    return getConstant(
2126               cast<ConstantInt>(Context->getConstantExprNeg(VC->getValue())));
2127
2128  const Type *Ty = V->getType();
2129  Ty = getEffectiveSCEVType(Ty);
2130  return getMulExpr(V,
2131                  getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty))));
2132}
2133
2134/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2135const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2136  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2137    return getConstant(
2138                cast<ConstantInt>(Context->getConstantExprNot(VC->getValue())));
2139
2140  const Type *Ty = V->getType();
2141  Ty = getEffectiveSCEVType(Ty);
2142  const SCEV *AllOnes =
2143                   getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty)));
2144  return getMinusSCEV(AllOnes, V);
2145}
2146
2147/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2148///
2149const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2150                                          const SCEV *RHS) {
2151  // X - Y --> X + -Y
2152  return getAddExpr(LHS, getNegativeSCEV(RHS));
2153}
2154
2155/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2156/// input value to the specified type.  If the type must be extended, it is zero
2157/// extended.
2158const SCEV *
2159ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2160                                         const Type *Ty) {
2161  const Type *SrcTy = V->getType();
2162  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2163         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2164         "Cannot truncate or zero extend with non-integer arguments!");
2165  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2166    return V;  // No conversion
2167  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2168    return getTruncateExpr(V, Ty);
2169  return getZeroExtendExpr(V, Ty);
2170}
2171
2172/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2173/// input value to the specified type.  If the type must be extended, it is sign
2174/// extended.
2175const SCEV *
2176ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2177                                         const Type *Ty) {
2178  const Type *SrcTy = V->getType();
2179  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2180         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2181         "Cannot truncate or zero extend with non-integer arguments!");
2182  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2183    return V;  // No conversion
2184  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2185    return getTruncateExpr(V, Ty);
2186  return getSignExtendExpr(V, Ty);
2187}
2188
2189/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2190/// input value to the specified type.  If the type must be extended, it is zero
2191/// extended.  The conversion must not be narrowing.
2192const SCEV *
2193ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2194  const Type *SrcTy = V->getType();
2195  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2196         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2197         "Cannot noop or zero extend with non-integer arguments!");
2198  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2199         "getNoopOrZeroExtend cannot truncate!");
2200  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2201    return V;  // No conversion
2202  return getZeroExtendExpr(V, Ty);
2203}
2204
2205/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2206/// input value to the specified type.  If the type must be extended, it is sign
2207/// extended.  The conversion must not be narrowing.
2208const SCEV *
2209ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2210  const Type *SrcTy = V->getType();
2211  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2212         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2213         "Cannot noop or sign extend with non-integer arguments!");
2214  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2215         "getNoopOrSignExtend cannot truncate!");
2216  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2217    return V;  // No conversion
2218  return getSignExtendExpr(V, Ty);
2219}
2220
2221/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2222/// the input value to the specified type. If the type must be extended,
2223/// it is extended with unspecified bits. The conversion must not be
2224/// narrowing.
2225const SCEV *
2226ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2227  const Type *SrcTy = V->getType();
2228  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2229         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2230         "Cannot noop or any extend with non-integer arguments!");
2231  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2232         "getNoopOrAnyExtend cannot truncate!");
2233  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2234    return V;  // No conversion
2235  return getAnyExtendExpr(V, Ty);
2236}
2237
2238/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2239/// input value to the specified type.  The conversion must not be widening.
2240const SCEV *
2241ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2242  const Type *SrcTy = V->getType();
2243  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2244         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2245         "Cannot truncate or noop with non-integer arguments!");
2246  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2247         "getTruncateOrNoop cannot extend!");
2248  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2249    return V;  // No conversion
2250  return getTruncateExpr(V, Ty);
2251}
2252
2253/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2254/// the types using zero-extension, and then perform a umax operation
2255/// with them.
2256const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2257                                                        const SCEV *RHS) {
2258  const SCEV *PromotedLHS = LHS;
2259  const SCEV *PromotedRHS = RHS;
2260
2261  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2262    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2263  else
2264    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2265
2266  return getUMaxExpr(PromotedLHS, PromotedRHS);
2267}
2268
2269/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2270/// the types using zero-extension, and then perform a umin operation
2271/// with them.
2272const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2273                                                        const SCEV *RHS) {
2274  const SCEV *PromotedLHS = LHS;
2275  const SCEV *PromotedRHS = RHS;
2276
2277  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2278    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2279  else
2280    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2281
2282  return getUMinExpr(PromotedLHS, PromotedRHS);
2283}
2284
2285/// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2286/// the specified instruction and replaces any references to the symbolic value
2287/// SymName with the specified value.  This is used during PHI resolution.
2288void
2289ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I,
2290                                                  const SCEV *SymName,
2291                                                  const SCEV *NewVal) {
2292  std::map<SCEVCallbackVH, const SCEV *>::iterator SI =
2293    Scalars.find(SCEVCallbackVH(I, this));
2294  if (SI == Scalars.end()) return;
2295
2296  const SCEV *NV =
2297    SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2298  if (NV == SI->second) return;  // No change.
2299
2300  SI->second = NV;       // Update the scalars map!
2301
2302  // Any instruction values that use this instruction might also need to be
2303  // updated!
2304  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2305       UI != E; ++UI)
2306    ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2307}
2308
2309/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2310/// a loop header, making it a potential recurrence, or it doesn't.
2311///
2312const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2313  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2314    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2315      if (L->getHeader() == PN->getParent()) {
2316        // If it lives in the loop header, it has two incoming values, one
2317        // from outside the loop, and one from inside.
2318        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2319        unsigned BackEdge     = IncomingEdge^1;
2320
2321        // While we are analyzing this PHI node, handle its value symbolically.
2322        const SCEV *SymbolicName = getUnknown(PN);
2323        assert(Scalars.find(PN) == Scalars.end() &&
2324               "PHI node already processed?");
2325        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2326
2327        // Using this symbolic name for the PHI, analyze the value coming around
2328        // the back-edge.
2329        const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2330
2331        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2332        // has a special value for the first iteration of the loop.
2333
2334        // If the value coming around the backedge is an add with the symbolic
2335        // value we just inserted, then we found a simple induction variable!
2336        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2337          // If there is a single occurrence of the symbolic value, replace it
2338          // with a recurrence.
2339          unsigned FoundIndex = Add->getNumOperands();
2340          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2341            if (Add->getOperand(i) == SymbolicName)
2342              if (FoundIndex == e) {
2343                FoundIndex = i;
2344                break;
2345              }
2346
2347          if (FoundIndex != Add->getNumOperands()) {
2348            // Create an add with everything but the specified operand.
2349            SmallVector<const SCEV *, 8> Ops;
2350            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2351              if (i != FoundIndex)
2352                Ops.push_back(Add->getOperand(i));
2353            const SCEV *Accum = getAddExpr(Ops);
2354
2355            // This is not a valid addrec if the step amount is varying each
2356            // loop iteration, but is not itself an addrec in this loop.
2357            if (Accum->isLoopInvariant(L) ||
2358                (isa<SCEVAddRecExpr>(Accum) &&
2359                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2360              const SCEV *StartVal =
2361                getSCEV(PN->getIncomingValue(IncomingEdge));
2362              const SCEV *PHISCEV =
2363                getAddRecExpr(StartVal, Accum, L);
2364
2365              // Okay, for the entire analysis of this edge we assumed the PHI
2366              // to be symbolic.  We now need to go back and update all of the
2367              // entries for the scalars that use the PHI (except for the PHI
2368              // itself) to use the new analyzed value instead of the "symbolic"
2369              // value.
2370              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2371              return PHISCEV;
2372            }
2373          }
2374        } else if (const SCEVAddRecExpr *AddRec =
2375                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2376          // Otherwise, this could be a loop like this:
2377          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2378          // In this case, j = {1,+,1}  and BEValue is j.
2379          // Because the other in-value of i (0) fits the evolution of BEValue
2380          // i really is an addrec evolution.
2381          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2382            const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2383
2384            // If StartVal = j.start - j.stride, we can use StartVal as the
2385            // initial step of the addrec evolution.
2386            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2387                                            AddRec->getOperand(1))) {
2388              const SCEV *PHISCEV =
2389                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2390
2391              // Okay, for the entire analysis of this edge we assumed the PHI
2392              // to be symbolic.  We now need to go back and update all of the
2393              // entries for the scalars that use the PHI (except for the PHI
2394              // itself) to use the new analyzed value instead of the "symbolic"
2395              // value.
2396              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2397              return PHISCEV;
2398            }
2399          }
2400        }
2401
2402        return SymbolicName;
2403      }
2404
2405  // If it's not a loop phi, we can't handle it yet.
2406  return getUnknown(PN);
2407}
2408
2409/// createNodeForGEP - Expand GEP instructions into add and multiply
2410/// operations. This allows them to be analyzed by regular SCEV code.
2411///
2412const SCEV *ScalarEvolution::createNodeForGEP(User *GEP) {
2413
2414  const Type *IntPtrTy = TD->getIntPtrType();
2415  Value *Base = GEP->getOperand(0);
2416  // Don't attempt to analyze GEPs over unsized objects.
2417  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2418    return getUnknown(GEP);
2419  const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2420  gep_type_iterator GTI = gep_type_begin(GEP);
2421  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2422                                      E = GEP->op_end();
2423       I != E; ++I) {
2424    Value *Index = *I;
2425    // Compute the (potentially symbolic) offset in bytes for this index.
2426    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2427      // For a struct, add the member offset.
2428      const StructLayout &SL = *TD->getStructLayout(STy);
2429      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2430      uint64_t Offset = SL.getElementOffset(FieldNo);
2431      TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy));
2432    } else {
2433      // For an array, add the element offset, explicitly scaled.
2434      const SCEV *LocalOffset = getSCEV(Index);
2435      if (!isa<PointerType>(LocalOffset->getType()))
2436        // Getelementptr indicies are signed.
2437        LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2438      LocalOffset =
2439        getMulExpr(LocalOffset,
2440                   getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy));
2441      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2442    }
2443  }
2444  return getAddExpr(getSCEV(Base), TotalOffset);
2445}
2446
2447/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2448/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2449/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2450/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2451uint32_t
2452ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2453  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2454    return C->getValue()->getValue().countTrailingZeros();
2455
2456  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2457    return std::min(GetMinTrailingZeros(T->getOperand()),
2458                    (uint32_t)getTypeSizeInBits(T->getType()));
2459
2460  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2461    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2462    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2463             getTypeSizeInBits(E->getType()) : OpRes;
2464  }
2465
2466  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2467    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2468    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2469             getTypeSizeInBits(E->getType()) : OpRes;
2470  }
2471
2472  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2473    // The result is the min of all operands results.
2474    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2475    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2476      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2477    return MinOpRes;
2478  }
2479
2480  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2481    // The result is the sum of all operands results.
2482    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2483    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2484    for (unsigned i = 1, e = M->getNumOperands();
2485         SumOpRes != BitWidth && i != e; ++i)
2486      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2487                          BitWidth);
2488    return SumOpRes;
2489  }
2490
2491  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2492    // The result is the min of all operands results.
2493    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2494    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2495      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2496    return MinOpRes;
2497  }
2498
2499  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2500    // The result is the min of all operands results.
2501    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2502    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2503      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2504    return MinOpRes;
2505  }
2506
2507  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2508    // The result is the min of all operands results.
2509    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2510    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2511      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2512    return MinOpRes;
2513  }
2514
2515  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2516    // For a SCEVUnknown, ask ValueTracking.
2517    unsigned BitWidth = getTypeSizeInBits(U->getType());
2518    APInt Mask = APInt::getAllOnesValue(BitWidth);
2519    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2520    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2521    return Zeros.countTrailingOnes();
2522  }
2523
2524  // SCEVUDivExpr
2525  return 0;
2526}
2527
2528/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2529///
2530ConstantRange
2531ScalarEvolution::getUnsignedRange(const SCEV *S) {
2532
2533  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2534    return ConstantRange(C->getValue()->getValue());
2535
2536  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2537    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2538    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2539      X = X.add(getUnsignedRange(Add->getOperand(i)));
2540    return X;
2541  }
2542
2543  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2544    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2545    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2546      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2547    return X;
2548  }
2549
2550  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2551    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2552    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2553      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2554    return X;
2555  }
2556
2557  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2558    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2559    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2560      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2561    return X;
2562  }
2563
2564  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2565    ConstantRange X = getUnsignedRange(UDiv->getLHS());
2566    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2567    return X.udiv(Y);
2568  }
2569
2570  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2571    ConstantRange X = getUnsignedRange(ZExt->getOperand());
2572    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2573  }
2574
2575  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2576    ConstantRange X = getUnsignedRange(SExt->getOperand());
2577    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2578  }
2579
2580  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2581    ConstantRange X = getUnsignedRange(Trunc->getOperand());
2582    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2583  }
2584
2585  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2586
2587  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2588    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2589    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2590    if (!Trip) return FullSet;
2591
2592    // TODO: non-affine addrec
2593    if (AddRec->isAffine()) {
2594      const Type *Ty = AddRec->getType();
2595      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2596      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2597        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2598
2599        const SCEV *Start = AddRec->getStart();
2600        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2601
2602        // Check for overflow.
2603        if (!isKnownPredicate(ICmpInst::ICMP_ULE, Start, End))
2604          return FullSet;
2605
2606        ConstantRange StartRange = getUnsignedRange(Start);
2607        ConstantRange EndRange = getUnsignedRange(End);
2608        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2609                                   EndRange.getUnsignedMin());
2610        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2611                                   EndRange.getUnsignedMax());
2612        if (Min.isMinValue() && Max.isMaxValue())
2613          return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true);
2614        return ConstantRange(Min, Max+1);
2615      }
2616    }
2617  }
2618
2619  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2620    // For a SCEVUnknown, ask ValueTracking.
2621    unsigned BitWidth = getTypeSizeInBits(U->getType());
2622    APInt Mask = APInt::getAllOnesValue(BitWidth);
2623    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2624    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2625    return ConstantRange(Ones, ~Zeros);
2626  }
2627
2628  return FullSet;
2629}
2630
2631/// getSignedRange - Determine the signed range for a particular SCEV.
2632///
2633ConstantRange
2634ScalarEvolution::getSignedRange(const SCEV *S) {
2635
2636  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2637    return ConstantRange(C->getValue()->getValue());
2638
2639  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2640    ConstantRange X = getSignedRange(Add->getOperand(0));
2641    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2642      X = X.add(getSignedRange(Add->getOperand(i)));
2643    return X;
2644  }
2645
2646  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2647    ConstantRange X = getSignedRange(Mul->getOperand(0));
2648    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2649      X = X.multiply(getSignedRange(Mul->getOperand(i)));
2650    return X;
2651  }
2652
2653  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2654    ConstantRange X = getSignedRange(SMax->getOperand(0));
2655    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2656      X = X.smax(getSignedRange(SMax->getOperand(i)));
2657    return X;
2658  }
2659
2660  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2661    ConstantRange X = getSignedRange(UMax->getOperand(0));
2662    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2663      X = X.umax(getSignedRange(UMax->getOperand(i)));
2664    return X;
2665  }
2666
2667  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2668    ConstantRange X = getSignedRange(UDiv->getLHS());
2669    ConstantRange Y = getSignedRange(UDiv->getRHS());
2670    return X.udiv(Y);
2671  }
2672
2673  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2674    ConstantRange X = getSignedRange(ZExt->getOperand());
2675    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2676  }
2677
2678  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2679    ConstantRange X = getSignedRange(SExt->getOperand());
2680    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2681  }
2682
2683  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2684    ConstantRange X = getSignedRange(Trunc->getOperand());
2685    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2686  }
2687
2688  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2689
2690  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2691    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2692    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2693    if (!Trip) return FullSet;
2694
2695    // TODO: non-affine addrec
2696    if (AddRec->isAffine()) {
2697      const Type *Ty = AddRec->getType();
2698      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2699      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2700        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2701
2702        const SCEV *Start = AddRec->getStart();
2703        const SCEV *Step = AddRec->getStepRecurrence(*this);
2704        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2705
2706        // Check for overflow.
2707        if (!(isKnownPositive(Step) &&
2708              isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) &&
2709            !(isKnownNegative(Step) &&
2710              isKnownPredicate(ICmpInst::ICMP_SGT, Start, End)))
2711          return FullSet;
2712
2713        ConstantRange StartRange = getSignedRange(Start);
2714        ConstantRange EndRange = getSignedRange(End);
2715        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
2716                                   EndRange.getSignedMin());
2717        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
2718                                   EndRange.getSignedMax());
2719        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
2720          return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true);
2721        return ConstantRange(Min, Max+1);
2722      }
2723    }
2724  }
2725
2726  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2727    // For a SCEVUnknown, ask ValueTracking.
2728    unsigned BitWidth = getTypeSizeInBits(U->getType());
2729    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
2730    if (NS == 1)
2731      return FullSet;
2732    return
2733      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
2734                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1);
2735  }
2736
2737  return FullSet;
2738}
2739
2740/// createSCEV - We know that there is no SCEV for the specified value.
2741/// Analyze the expression.
2742///
2743const SCEV *ScalarEvolution::createSCEV(Value *V) {
2744  if (!isSCEVable(V->getType()))
2745    return getUnknown(V);
2746
2747  unsigned Opcode = Instruction::UserOp1;
2748  if (Instruction *I = dyn_cast<Instruction>(V))
2749    Opcode = I->getOpcode();
2750  else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2751    Opcode = CE->getOpcode();
2752  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2753    return getConstant(CI);
2754  else if (isa<ConstantPointerNull>(V))
2755    return getIntegerSCEV(0, V->getType());
2756  else if (isa<UndefValue>(V))
2757    return getIntegerSCEV(0, V->getType());
2758  else
2759    return getUnknown(V);
2760
2761  User *U = cast<User>(V);
2762  switch (Opcode) {
2763  case Instruction::Add:
2764    return getAddExpr(getSCEV(U->getOperand(0)),
2765                      getSCEV(U->getOperand(1)));
2766  case Instruction::Mul:
2767    return getMulExpr(getSCEV(U->getOperand(0)),
2768                      getSCEV(U->getOperand(1)));
2769  case Instruction::UDiv:
2770    return getUDivExpr(getSCEV(U->getOperand(0)),
2771                       getSCEV(U->getOperand(1)));
2772  case Instruction::Sub:
2773    return getMinusSCEV(getSCEV(U->getOperand(0)),
2774                        getSCEV(U->getOperand(1)));
2775  case Instruction::And:
2776    // For an expression like x&255 that merely masks off the high bits,
2777    // use zext(trunc(x)) as the SCEV expression.
2778    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2779      if (CI->isNullValue())
2780        return getSCEV(U->getOperand(1));
2781      if (CI->isAllOnesValue())
2782        return getSCEV(U->getOperand(0));
2783      const APInt &A = CI->getValue();
2784
2785      // Instcombine's ShrinkDemandedConstant may strip bits out of
2786      // constants, obscuring what would otherwise be a low-bits mask.
2787      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2788      // knew about to reconstruct a low-bits mask value.
2789      unsigned LZ = A.countLeadingZeros();
2790      unsigned BitWidth = A.getBitWidth();
2791      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2792      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2793      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2794
2795      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2796
2797      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2798        return
2799          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2800                                            IntegerType::get(BitWidth - LZ)),
2801                            U->getType());
2802    }
2803    break;
2804
2805  case Instruction::Or:
2806    // If the RHS of the Or is a constant, we may have something like:
2807    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
2808    // optimizations will transparently handle this case.
2809    //
2810    // In order for this transformation to be safe, the LHS must be of the
2811    // form X*(2^n) and the Or constant must be less than 2^n.
2812    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2813      const SCEV *LHS = getSCEV(U->getOperand(0));
2814      const APInt &CIVal = CI->getValue();
2815      if (GetMinTrailingZeros(LHS) >=
2816          (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2817        return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2818    }
2819    break;
2820  case Instruction::Xor:
2821    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2822      // If the RHS of the xor is a signbit, then this is just an add.
2823      // Instcombine turns add of signbit into xor as a strength reduction step.
2824      if (CI->getValue().isSignBit())
2825        return getAddExpr(getSCEV(U->getOperand(0)),
2826                          getSCEV(U->getOperand(1)));
2827
2828      // If the RHS of xor is -1, then this is a not operation.
2829      if (CI->isAllOnesValue())
2830        return getNotSCEV(getSCEV(U->getOperand(0)));
2831
2832      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2833      // This is a variant of the check for xor with -1, and it handles
2834      // the case where instcombine has trimmed non-demanded bits out
2835      // of an xor with -1.
2836      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2837        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2838          if (BO->getOpcode() == Instruction::And &&
2839              LCI->getValue() == CI->getValue())
2840            if (const SCEVZeroExtendExpr *Z =
2841                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2842              const Type *UTy = U->getType();
2843              const SCEV *Z0 = Z->getOperand();
2844              const Type *Z0Ty = Z0->getType();
2845              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2846
2847              // If C is a low-bits mask, the zero extend is zerving to
2848              // mask off the high bits. Complement the operand and
2849              // re-apply the zext.
2850              if (APIntOps::isMask(Z0TySize, CI->getValue()))
2851                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2852
2853              // If C is a single bit, it may be in the sign-bit position
2854              // before the zero-extend. In this case, represent the xor
2855              // using an add, which is equivalent, and re-apply the zext.
2856              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2857              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2858                  Trunc.isSignBit())
2859                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2860                                         UTy);
2861            }
2862    }
2863    break;
2864
2865  case Instruction::Shl:
2866    // Turn shift left of a constant amount into a multiply.
2867    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2868      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2869      Constant *X = ConstantInt::get(
2870        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2871      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2872    }
2873    break;
2874
2875  case Instruction::LShr:
2876    // Turn logical shift right of a constant into a unsigned divide.
2877    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2878      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2879      Constant *X = ConstantInt::get(
2880        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2881      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2882    }
2883    break;
2884
2885  case Instruction::AShr:
2886    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2887    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2888      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2889        if (L->getOpcode() == Instruction::Shl &&
2890            L->getOperand(1) == U->getOperand(1)) {
2891          unsigned BitWidth = getTypeSizeInBits(U->getType());
2892          uint64_t Amt = BitWidth - CI->getZExtValue();
2893          if (Amt == BitWidth)
2894            return getSCEV(L->getOperand(0));       // shift by zero --> noop
2895          if (Amt > BitWidth)
2896            return getIntegerSCEV(0, U->getType()); // value is undefined
2897          return
2898            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2899                                                      IntegerType::get(Amt)),
2900                                 U->getType());
2901        }
2902    break;
2903
2904  case Instruction::Trunc:
2905    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2906
2907  case Instruction::ZExt:
2908    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2909
2910  case Instruction::SExt:
2911    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2912
2913  case Instruction::BitCast:
2914    // BitCasts are no-op casts so we just eliminate the cast.
2915    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2916      return getSCEV(U->getOperand(0));
2917    break;
2918
2919  case Instruction::IntToPtr:
2920    if (!TD) break; // Without TD we can't analyze pointers.
2921    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2922                                   TD->getIntPtrType());
2923
2924  case Instruction::PtrToInt:
2925    if (!TD) break; // Without TD we can't analyze pointers.
2926    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2927                                   U->getType());
2928
2929  case Instruction::GetElementPtr:
2930    if (!TD) break; // Without TD we can't analyze pointers.
2931    return createNodeForGEP(U);
2932
2933  case Instruction::PHI:
2934    return createNodeForPHI(cast<PHINode>(U));
2935
2936  case Instruction::Select:
2937    // This could be a smax or umax that was lowered earlier.
2938    // Try to recover it.
2939    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2940      Value *LHS = ICI->getOperand(0);
2941      Value *RHS = ICI->getOperand(1);
2942      switch (ICI->getPredicate()) {
2943      case ICmpInst::ICMP_SLT:
2944      case ICmpInst::ICMP_SLE:
2945        std::swap(LHS, RHS);
2946        // fall through
2947      case ICmpInst::ICMP_SGT:
2948      case ICmpInst::ICMP_SGE:
2949        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2950          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2951        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2952          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2953        break;
2954      case ICmpInst::ICMP_ULT:
2955      case ICmpInst::ICMP_ULE:
2956        std::swap(LHS, RHS);
2957        // fall through
2958      case ICmpInst::ICMP_UGT:
2959      case ICmpInst::ICMP_UGE:
2960        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2961          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2962        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2963          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2964        break;
2965      case ICmpInst::ICMP_NE:
2966        // n != 0 ? n : 1  ->  umax(n, 1)
2967        if (LHS == U->getOperand(1) &&
2968            isa<ConstantInt>(U->getOperand(2)) &&
2969            cast<ConstantInt>(U->getOperand(2))->isOne() &&
2970            isa<ConstantInt>(RHS) &&
2971            cast<ConstantInt>(RHS)->isZero())
2972          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2973        break;
2974      case ICmpInst::ICMP_EQ:
2975        // n == 0 ? 1 : n  ->  umax(n, 1)
2976        if (LHS == U->getOperand(2) &&
2977            isa<ConstantInt>(U->getOperand(1)) &&
2978            cast<ConstantInt>(U->getOperand(1))->isOne() &&
2979            isa<ConstantInt>(RHS) &&
2980            cast<ConstantInt>(RHS)->isZero())
2981          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
2982        break;
2983      default:
2984        break;
2985      }
2986    }
2987
2988  default: // We cannot analyze this expression.
2989    break;
2990  }
2991
2992  return getUnknown(V);
2993}
2994
2995
2996
2997//===----------------------------------------------------------------------===//
2998//                   Iteration Count Computation Code
2999//
3000
3001/// getBackedgeTakenCount - If the specified loop has a predictable
3002/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3003/// object. The backedge-taken count is the number of times the loop header
3004/// will be branched to from within the loop. This is one less than the
3005/// trip count of the loop, since it doesn't count the first iteration,
3006/// when the header is branched to from outside the loop.
3007///
3008/// Note that it is not valid to call this method on a loop without a
3009/// loop-invariant backedge-taken count (see
3010/// hasLoopInvariantBackedgeTakenCount).
3011///
3012const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3013  return getBackedgeTakenInfo(L).Exact;
3014}
3015
3016/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3017/// return the least SCEV value that is known never to be less than the
3018/// actual backedge taken count.
3019const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3020  return getBackedgeTakenInfo(L).Max;
3021}
3022
3023/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3024/// onto the given Worklist.
3025static void
3026PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3027  BasicBlock *Header = L->getHeader();
3028
3029  // Push all Loop-header PHIs onto the Worklist stack.
3030  for (BasicBlock::iterator I = Header->begin();
3031       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3032    Worklist.push_back(PN);
3033}
3034
3035/// PushDefUseChildren - Push users of the given Instruction
3036/// onto the given Worklist.
3037static void
3038PushDefUseChildren(Instruction *I,
3039                   SmallVectorImpl<Instruction *> &Worklist) {
3040  // Push the def-use children onto the Worklist stack.
3041  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
3042       UI != UE; ++UI)
3043    Worklist.push_back(cast<Instruction>(UI));
3044}
3045
3046const ScalarEvolution::BackedgeTakenInfo &
3047ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3048  // Initially insert a CouldNotCompute for this loop. If the insertion
3049  // succeeds, procede to actually compute a backedge-taken count and
3050  // update the value. The temporary CouldNotCompute value tells SCEV
3051  // code elsewhere that it shouldn't attempt to request a new
3052  // backedge-taken count, which could result in infinite recursion.
3053  std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
3054    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3055  if (Pair.second) {
3056    BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
3057    if (ItCount.Exact != getCouldNotCompute()) {
3058      assert(ItCount.Exact->isLoopInvariant(L) &&
3059             ItCount.Max->isLoopInvariant(L) &&
3060             "Computed trip count isn't loop invariant for loop!");
3061      ++NumTripCountsComputed;
3062
3063      // Update the value in the map.
3064      Pair.first->second = ItCount;
3065    } else {
3066      if (ItCount.Max != getCouldNotCompute())
3067        // Update the value in the map.
3068        Pair.first->second = ItCount;
3069      if (isa<PHINode>(L->getHeader()->begin()))
3070        // Only count loops that have phi nodes as not being computable.
3071        ++NumTripCountsNotComputed;
3072    }
3073
3074    // Now that we know more about the trip count for this loop, forget any
3075    // existing SCEV values for PHI nodes in this loop since they are only
3076    // conservative estimates made without the benefit of trip count
3077    // information. This is similar to the code in
3078    // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
3079    // nodes specially.
3080    if (ItCount.hasAnyInfo()) {
3081      SmallVector<Instruction *, 16> Worklist;
3082      PushLoopPHIs(L, Worklist);
3083
3084      SmallPtrSet<Instruction *, 8> Visited;
3085      while (!Worklist.empty()) {
3086        Instruction *I = Worklist.pop_back_val();
3087        if (!Visited.insert(I)) continue;
3088
3089        std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3090          Scalars.find(static_cast<Value *>(I));
3091        if (It != Scalars.end()) {
3092          // SCEVUnknown for a PHI either means that it has an unrecognized
3093          // structure, or it's a PHI that's in the progress of being computed
3094          // by createNodeForPHI.  In the former case, additional loop trip
3095          // count information isn't going to change anything. In the later
3096          // case, createNodeForPHI will perform the necessary updates on its
3097          // own when it gets to that point.
3098          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
3099            Scalars.erase(It);
3100          ValuesAtScopes.erase(I);
3101          if (PHINode *PN = dyn_cast<PHINode>(I))
3102            ConstantEvolutionLoopExitValue.erase(PN);
3103        }
3104
3105        PushDefUseChildren(I, Worklist);
3106      }
3107    }
3108  }
3109  return Pair.first->second;
3110}
3111
3112/// forgetLoopBackedgeTakenCount - This method should be called by the
3113/// client when it has changed a loop in a way that may effect
3114/// ScalarEvolution's ability to compute a trip count, or if the loop
3115/// is deleted.
3116void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
3117  BackedgeTakenCounts.erase(L);
3118
3119  SmallVector<Instruction *, 16> Worklist;
3120  PushLoopPHIs(L, Worklist);
3121
3122  SmallPtrSet<Instruction *, 8> Visited;
3123  while (!Worklist.empty()) {
3124    Instruction *I = Worklist.pop_back_val();
3125    if (!Visited.insert(I)) continue;
3126
3127    std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3128      Scalars.find(static_cast<Value *>(I));
3129    if (It != Scalars.end()) {
3130      Scalars.erase(It);
3131      ValuesAtScopes.erase(I);
3132      if (PHINode *PN = dyn_cast<PHINode>(I))
3133        ConstantEvolutionLoopExitValue.erase(PN);
3134    }
3135
3136    PushDefUseChildren(I, Worklist);
3137  }
3138}
3139
3140/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3141/// of the specified loop will execute.
3142ScalarEvolution::BackedgeTakenInfo
3143ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3144  SmallVector<BasicBlock*, 8> ExitingBlocks;
3145  L->getExitingBlocks(ExitingBlocks);
3146
3147  // Examine all exits and pick the most conservative values.
3148  const SCEV *BECount = getCouldNotCompute();
3149  const SCEV *MaxBECount = getCouldNotCompute();
3150  bool CouldNotComputeBECount = false;
3151  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3152    BackedgeTakenInfo NewBTI =
3153      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3154
3155    if (NewBTI.Exact == getCouldNotCompute()) {
3156      // We couldn't compute an exact value for this exit, so
3157      // we won't be able to compute an exact value for the loop.
3158      CouldNotComputeBECount = true;
3159      BECount = getCouldNotCompute();
3160    } else if (!CouldNotComputeBECount) {
3161      if (BECount == getCouldNotCompute())
3162        BECount = NewBTI.Exact;
3163      else
3164        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3165    }
3166    if (MaxBECount == getCouldNotCompute())
3167      MaxBECount = NewBTI.Max;
3168    else if (NewBTI.Max != getCouldNotCompute())
3169      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3170  }
3171
3172  return BackedgeTakenInfo(BECount, MaxBECount);
3173}
3174
3175/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3176/// of the specified loop will execute if it exits via the specified block.
3177ScalarEvolution::BackedgeTakenInfo
3178ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3179                                                   BasicBlock *ExitingBlock) {
3180
3181  // Okay, we've chosen an exiting block.  See what condition causes us to
3182  // exit at this block.
3183  //
3184  // FIXME: we should be able to handle switch instructions (with a single exit)
3185  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3186  if (ExitBr == 0) return getCouldNotCompute();
3187  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3188
3189  // At this point, we know we have a conditional branch that determines whether
3190  // the loop is exited.  However, we don't know if the branch is executed each
3191  // time through the loop.  If not, then the execution count of the branch will
3192  // not be equal to the trip count of the loop.
3193  //
3194  // Currently we check for this by checking to see if the Exit branch goes to
3195  // the loop header.  If so, we know it will always execute the same number of
3196  // times as the loop.  We also handle the case where the exit block *is* the
3197  // loop header.  This is common for un-rotated loops.
3198  //
3199  // If both of those tests fail, walk up the unique predecessor chain to the
3200  // header, stopping if there is an edge that doesn't exit the loop. If the
3201  // header is reached, the execution count of the branch will be equal to the
3202  // trip count of the loop.
3203  //
3204  //  More extensive analysis could be done to handle more cases here.
3205  //
3206  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3207      ExitBr->getSuccessor(1) != L->getHeader() &&
3208      ExitBr->getParent() != L->getHeader()) {
3209    // The simple checks failed, try climbing the unique predecessor chain
3210    // up to the header.
3211    bool Ok = false;
3212    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3213      BasicBlock *Pred = BB->getUniquePredecessor();
3214      if (!Pred)
3215        return getCouldNotCompute();
3216      TerminatorInst *PredTerm = Pred->getTerminator();
3217      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3218        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3219        if (PredSucc == BB)
3220          continue;
3221        // If the predecessor has a successor that isn't BB and isn't
3222        // outside the loop, assume the worst.
3223        if (L->contains(PredSucc))
3224          return getCouldNotCompute();
3225      }
3226      if (Pred == L->getHeader()) {
3227        Ok = true;
3228        break;
3229      }
3230      BB = Pred;
3231    }
3232    if (!Ok)
3233      return getCouldNotCompute();
3234  }
3235
3236  // Procede to the next level to examine the exit condition expression.
3237  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3238                                               ExitBr->getSuccessor(0),
3239                                               ExitBr->getSuccessor(1));
3240}
3241
3242/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3243/// backedge of the specified loop will execute if its exit condition
3244/// were a conditional branch of ExitCond, TBB, and FBB.
3245ScalarEvolution::BackedgeTakenInfo
3246ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3247                                                       Value *ExitCond,
3248                                                       BasicBlock *TBB,
3249                                                       BasicBlock *FBB) {
3250  // Check if the controlling expression for this loop is an And or Or.
3251  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3252    if (BO->getOpcode() == Instruction::And) {
3253      // Recurse on the operands of the and.
3254      BackedgeTakenInfo BTI0 =
3255        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3256      BackedgeTakenInfo BTI1 =
3257        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3258      const SCEV *BECount = getCouldNotCompute();
3259      const SCEV *MaxBECount = getCouldNotCompute();
3260      if (L->contains(TBB)) {
3261        // Both conditions must be true for the loop to continue executing.
3262        // Choose the less conservative count.
3263        if (BTI0.Exact == getCouldNotCompute() ||
3264            BTI1.Exact == getCouldNotCompute())
3265          BECount = getCouldNotCompute();
3266        else
3267          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3268        if (BTI0.Max == getCouldNotCompute())
3269          MaxBECount = BTI1.Max;
3270        else if (BTI1.Max == getCouldNotCompute())
3271          MaxBECount = BTI0.Max;
3272        else
3273          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3274      } else {
3275        // Both conditions must be true for the loop to exit.
3276        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3277        if (BTI0.Exact != getCouldNotCompute() &&
3278            BTI1.Exact != getCouldNotCompute())
3279          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3280        if (BTI0.Max != getCouldNotCompute() &&
3281            BTI1.Max != getCouldNotCompute())
3282          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3283      }
3284
3285      return BackedgeTakenInfo(BECount, MaxBECount);
3286    }
3287    if (BO->getOpcode() == Instruction::Or) {
3288      // Recurse on the operands of the or.
3289      BackedgeTakenInfo BTI0 =
3290        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3291      BackedgeTakenInfo BTI1 =
3292        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3293      const SCEV *BECount = getCouldNotCompute();
3294      const SCEV *MaxBECount = getCouldNotCompute();
3295      if (L->contains(FBB)) {
3296        // Both conditions must be false for the loop to continue executing.
3297        // Choose the less conservative count.
3298        if (BTI0.Exact == getCouldNotCompute() ||
3299            BTI1.Exact == getCouldNotCompute())
3300          BECount = getCouldNotCompute();
3301        else
3302          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3303        if (BTI0.Max == getCouldNotCompute())
3304          MaxBECount = BTI1.Max;
3305        else if (BTI1.Max == getCouldNotCompute())
3306          MaxBECount = BTI0.Max;
3307        else
3308          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3309      } else {
3310        // Both conditions must be false for the loop to exit.
3311        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3312        if (BTI0.Exact != getCouldNotCompute() &&
3313            BTI1.Exact != getCouldNotCompute())
3314          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3315        if (BTI0.Max != getCouldNotCompute() &&
3316            BTI1.Max != getCouldNotCompute())
3317          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3318      }
3319
3320      return BackedgeTakenInfo(BECount, MaxBECount);
3321    }
3322  }
3323
3324  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3325  // Procede to the next level to examine the icmp.
3326  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3327    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3328
3329  // If it's not an integer or pointer comparison then compute it the hard way.
3330  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3331}
3332
3333/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3334/// backedge of the specified loop will execute if its exit condition
3335/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3336ScalarEvolution::BackedgeTakenInfo
3337ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3338                                                           ICmpInst *ExitCond,
3339                                                           BasicBlock *TBB,
3340                                                           BasicBlock *FBB) {
3341
3342  // If the condition was exit on true, convert the condition to exit on false
3343  ICmpInst::Predicate Cond;
3344  if (!L->contains(FBB))
3345    Cond = ExitCond->getPredicate();
3346  else
3347    Cond = ExitCond->getInversePredicate();
3348
3349  // Handle common loops like: for (X = "string"; *X; ++X)
3350  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3351    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3352      const SCEV *ItCnt =
3353        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3354      if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3355        unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3356        return BackedgeTakenInfo(ItCnt,
3357                                 isa<SCEVConstant>(ItCnt) ? ItCnt :
3358                                   getConstant(APInt::getMaxValue(BitWidth)-1));
3359      }
3360    }
3361
3362  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3363  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3364
3365  // Try to evaluate any dependencies out of the loop.
3366  LHS = getSCEVAtScope(LHS, L);
3367  RHS = getSCEVAtScope(RHS, L);
3368
3369  // At this point, we would like to compute how many iterations of the
3370  // loop the predicate will return true for these inputs.
3371  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3372    // If there is a loop-invariant, force it into the RHS.
3373    std::swap(LHS, RHS);
3374    Cond = ICmpInst::getSwappedPredicate(Cond);
3375  }
3376
3377  // If we have a comparison of a chrec against a constant, try to use value
3378  // ranges to answer this query.
3379  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3380    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3381      if (AddRec->getLoop() == L) {
3382        // Form the constant range.
3383        ConstantRange CompRange(
3384            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3385
3386        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3387        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3388      }
3389
3390  switch (Cond) {
3391  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3392    // Convert to: while (X-Y != 0)
3393    const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3394    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3395    break;
3396  }
3397  case ICmpInst::ICMP_EQ: {
3398    // Convert to: while (X-Y == 0)           // while (X == Y)
3399    const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3400    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3401    break;
3402  }
3403  case ICmpInst::ICMP_SLT: {
3404    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3405    if (BTI.hasAnyInfo()) return BTI;
3406    break;
3407  }
3408  case ICmpInst::ICMP_SGT: {
3409    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3410                                             getNotSCEV(RHS), L, true);
3411    if (BTI.hasAnyInfo()) return BTI;
3412    break;
3413  }
3414  case ICmpInst::ICMP_ULT: {
3415    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3416    if (BTI.hasAnyInfo()) return BTI;
3417    break;
3418  }
3419  case ICmpInst::ICMP_UGT: {
3420    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3421                                             getNotSCEV(RHS), L, false);
3422    if (BTI.hasAnyInfo()) return BTI;
3423    break;
3424  }
3425  default:
3426#if 0
3427    errs() << "ComputeBackedgeTakenCount ";
3428    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3429      errs() << "[unsigned] ";
3430    errs() << *LHS << "   "
3431         << Instruction::getOpcodeName(Instruction::ICmp)
3432         << "   " << *RHS << "\n";
3433#endif
3434    break;
3435  }
3436  return
3437    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3438}
3439
3440static ConstantInt *
3441EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3442                                ScalarEvolution &SE) {
3443  const SCEV *InVal = SE.getConstant(C);
3444  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3445  assert(isa<SCEVConstant>(Val) &&
3446         "Evaluation of SCEV at constant didn't fold correctly?");
3447  return cast<SCEVConstant>(Val)->getValue();
3448}
3449
3450/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3451/// and a GEP expression (missing the pointer index) indexing into it, return
3452/// the addressed element of the initializer or null if the index expression is
3453/// invalid.
3454static Constant *
3455GetAddressedElementFromGlobal(LLVMContext *Context, GlobalVariable *GV,
3456                              const std::vector<ConstantInt*> &Indices) {
3457  Constant *Init = GV->getInitializer();
3458  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3459    uint64_t Idx = Indices[i]->getZExtValue();
3460    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3461      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3462      Init = cast<Constant>(CS->getOperand(Idx));
3463    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3464      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3465      Init = cast<Constant>(CA->getOperand(Idx));
3466    } else if (isa<ConstantAggregateZero>(Init)) {
3467      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3468        assert(Idx < STy->getNumElements() && "Bad struct index!");
3469        Init = Context->getNullValue(STy->getElementType(Idx));
3470      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3471        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3472        Init = Context->getNullValue(ATy->getElementType());
3473      } else {
3474        LLVM_UNREACHABLE("Unknown constant aggregate type!");
3475      }
3476      return 0;
3477    } else {
3478      return 0; // Unknown initializer type
3479    }
3480  }
3481  return Init;
3482}
3483
3484/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3485/// 'icmp op load X, cst', try to see if we can compute the backedge
3486/// execution count.
3487const SCEV *
3488ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3489                                                LoadInst *LI,
3490                                                Constant *RHS,
3491                                                const Loop *L,
3492                                                ICmpInst::Predicate predicate) {
3493  if (LI->isVolatile()) return getCouldNotCompute();
3494
3495  // Check to see if the loaded pointer is a getelementptr of a global.
3496  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3497  if (!GEP) return getCouldNotCompute();
3498
3499  // Make sure that it is really a constant global we are gepping, with an
3500  // initializer, and make sure the first IDX is really 0.
3501  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3502  if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3503      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3504      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3505    return getCouldNotCompute();
3506
3507  // Okay, we allow one non-constant index into the GEP instruction.
3508  Value *VarIdx = 0;
3509  std::vector<ConstantInt*> Indexes;
3510  unsigned VarIdxNum = 0;
3511  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3512    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3513      Indexes.push_back(CI);
3514    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3515      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3516      VarIdx = GEP->getOperand(i);
3517      VarIdxNum = i-2;
3518      Indexes.push_back(0);
3519    }
3520
3521  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3522  // Check to see if X is a loop variant variable value now.
3523  const SCEV *Idx = getSCEV(VarIdx);
3524  Idx = getSCEVAtScope(Idx, L);
3525
3526  // We can only recognize very limited forms of loop index expressions, in
3527  // particular, only affine AddRec's like {C1,+,C2}.
3528  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3529  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3530      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3531      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3532    return getCouldNotCompute();
3533
3534  unsigned MaxSteps = MaxBruteForceIterations;
3535  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3536    ConstantInt *ItCst =
3537      ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum);
3538    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3539
3540    // Form the GEP offset.
3541    Indexes[VarIdxNum] = Val;
3542
3543    Constant *Result = GetAddressedElementFromGlobal(Context, GV, Indexes);
3544    if (Result == 0) break;  // Cannot compute!
3545
3546    // Evaluate the condition for this iteration.
3547    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3548    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3549    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3550#if 0
3551      errs() << "\n***\n*** Computed loop count " << *ItCst
3552             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3553             << "***\n";
3554#endif
3555      ++NumArrayLenItCounts;
3556      return getConstant(ItCst);   // Found terminating iteration!
3557    }
3558  }
3559  return getCouldNotCompute();
3560}
3561
3562
3563/// CanConstantFold - Return true if we can constant fold an instruction of the
3564/// specified type, assuming that all operands were constants.
3565static bool CanConstantFold(const Instruction *I) {
3566  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3567      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3568    return true;
3569
3570  if (const CallInst *CI = dyn_cast<CallInst>(I))
3571    if (const Function *F = CI->getCalledFunction())
3572      return canConstantFoldCallTo(F);
3573  return false;
3574}
3575
3576/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3577/// in the loop that V is derived from.  We allow arbitrary operations along the
3578/// way, but the operands of an operation must either be constants or a value
3579/// derived from a constant PHI.  If this expression does not fit with these
3580/// constraints, return null.
3581static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3582  // If this is not an instruction, or if this is an instruction outside of the
3583  // loop, it can't be derived from a loop PHI.
3584  Instruction *I = dyn_cast<Instruction>(V);
3585  if (I == 0 || !L->contains(I->getParent())) return 0;
3586
3587  if (PHINode *PN = dyn_cast<PHINode>(I)) {
3588    if (L->getHeader() == I->getParent())
3589      return PN;
3590    else
3591      // We don't currently keep track of the control flow needed to evaluate
3592      // PHIs, so we cannot handle PHIs inside of loops.
3593      return 0;
3594  }
3595
3596  // If we won't be able to constant fold this expression even if the operands
3597  // are constants, return early.
3598  if (!CanConstantFold(I)) return 0;
3599
3600  // Otherwise, we can evaluate this instruction if all of its operands are
3601  // constant or derived from a PHI node themselves.
3602  PHINode *PHI = 0;
3603  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3604    if (!(isa<Constant>(I->getOperand(Op)) ||
3605          isa<GlobalValue>(I->getOperand(Op)))) {
3606      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3607      if (P == 0) return 0;  // Not evolving from PHI
3608      if (PHI == 0)
3609        PHI = P;
3610      else if (PHI != P)
3611        return 0;  // Evolving from multiple different PHIs.
3612    }
3613
3614  // This is a expression evolving from a constant PHI!
3615  return PHI;
3616}
3617
3618/// EvaluateExpression - Given an expression that passes the
3619/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3620/// in the loop has the value PHIVal.  If we can't fold this expression for some
3621/// reason, return null.
3622static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3623  if (isa<PHINode>(V)) return PHIVal;
3624  if (Constant *C = dyn_cast<Constant>(V)) return C;
3625  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3626  Instruction *I = cast<Instruction>(V);
3627  LLVMContext *Context = I->getParent()->getContext();
3628
3629  std::vector<Constant*> Operands;
3630  Operands.resize(I->getNumOperands());
3631
3632  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3633    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3634    if (Operands[i] == 0) return 0;
3635  }
3636
3637  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3638    return ConstantFoldCompareInstOperands(CI->getPredicate(),
3639                                           &Operands[0], Operands.size(),
3640                                           Context);
3641  else
3642    return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3643                                    &Operands[0], Operands.size(),
3644                                    Context);
3645}
3646
3647/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3648/// in the header of its containing loop, we know the loop executes a
3649/// constant number of times, and the PHI node is just a recurrence
3650/// involving constants, fold it.
3651Constant *
3652ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3653                                                   const APInt& BEs,
3654                                                   const Loop *L) {
3655  std::map<PHINode*, Constant*>::iterator I =
3656    ConstantEvolutionLoopExitValue.find(PN);
3657  if (I != ConstantEvolutionLoopExitValue.end())
3658    return I->second;
3659
3660  if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3661    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
3662
3663  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3664
3665  // Since the loop is canonicalized, the PHI node must have two entries.  One
3666  // entry must be a constant (coming in from outside of the loop), and the
3667  // second must be derived from the same PHI.
3668  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3669  Constant *StartCST =
3670    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3671  if (StartCST == 0)
3672    return RetVal = 0;  // Must be a constant.
3673
3674  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3675  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3676  if (PN2 != PN)
3677    return RetVal = 0;  // Not derived from same PHI.
3678
3679  // Execute the loop symbolically to determine the exit value.
3680  if (BEs.getActiveBits() >= 32)
3681    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3682
3683  unsigned NumIterations = BEs.getZExtValue(); // must be in range
3684  unsigned IterationNum = 0;
3685  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3686    if (IterationNum == NumIterations)
3687      return RetVal = PHIVal;  // Got exit value!
3688
3689    // Compute the value of the PHI node for the next iteration.
3690    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3691    if (NextPHI == PHIVal)
3692      return RetVal = NextPHI;  // Stopped evolving!
3693    if (NextPHI == 0)
3694      return 0;        // Couldn't evaluate!
3695    PHIVal = NextPHI;
3696  }
3697}
3698
3699/// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3700/// constant number of times (the condition evolves only from constants),
3701/// try to evaluate a few iterations of the loop until we get the exit
3702/// condition gets a value of ExitWhen (true or false).  If we cannot
3703/// evaluate the trip count of the loop, return getCouldNotCompute().
3704const SCEV *
3705ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3706                                                       Value *Cond,
3707                                                       bool ExitWhen) {
3708  PHINode *PN = getConstantEvolvingPHI(Cond, L);
3709  if (PN == 0) return getCouldNotCompute();
3710
3711  // Since the loop is canonicalized, the PHI node must have two entries.  One
3712  // entry must be a constant (coming in from outside of the loop), and the
3713  // second must be derived from the same PHI.
3714  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3715  Constant *StartCST =
3716    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3717  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
3718
3719  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3720  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3721  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
3722
3723  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
3724  // the loop symbolically to determine when the condition gets a value of
3725  // "ExitWhen".
3726  unsigned IterationNum = 0;
3727  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
3728  for (Constant *PHIVal = StartCST;
3729       IterationNum != MaxIterations; ++IterationNum) {
3730    ConstantInt *CondVal =
3731      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3732
3733    // Couldn't symbolically evaluate.
3734    if (!CondVal) return getCouldNotCompute();
3735
3736    if (CondVal->getValue() == uint64_t(ExitWhen)) {
3737      ++NumBruteForceTripCountsComputed;
3738      return getConstant(Type::Int32Ty, IterationNum);
3739    }
3740
3741    // Compute the value of the PHI node for the next iteration.
3742    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3743    if (NextPHI == 0 || NextPHI == PHIVal)
3744      return getCouldNotCompute();// Couldn't evaluate or not making progress...
3745    PHIVal = NextPHI;
3746  }
3747
3748  // Too many iterations were needed to evaluate.
3749  return getCouldNotCompute();
3750}
3751
3752/// getSCEVAtScope - Return a SCEV expression handle for the specified value
3753/// at the specified scope in the program.  The L value specifies a loop
3754/// nest to evaluate the expression at, where null is the top-level or a
3755/// specified loop is immediately inside of the loop.
3756///
3757/// This method can be used to compute the exit value for a variable defined
3758/// in a loop by querying what the value will hold in the parent loop.
3759///
3760/// In the case that a relevant loop exit value cannot be computed, the
3761/// original value V is returned.
3762const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3763  // FIXME: this should be turned into a virtual method on SCEV!
3764
3765  if (isa<SCEVConstant>(V)) return V;
3766
3767  // If this instruction is evolved from a constant-evolving PHI, compute the
3768  // exit value from the loop without using SCEVs.
3769  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3770    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3771      const Loop *LI = (*this->LI)[I->getParent()];
3772      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
3773        if (PHINode *PN = dyn_cast<PHINode>(I))
3774          if (PN->getParent() == LI->getHeader()) {
3775            // Okay, there is no closed form solution for the PHI node.  Check
3776            // to see if the loop that contains it has a known backedge-taken
3777            // count.  If so, we may be able to force computation of the exit
3778            // value.
3779            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3780            if (const SCEVConstant *BTCC =
3781                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3782              // Okay, we know how many times the containing loop executes.  If
3783              // this is a constant evolving PHI node, get the final value at
3784              // the specified iteration number.
3785              Constant *RV = getConstantEvolutionLoopExitValue(PN,
3786                                                   BTCC->getValue()->getValue(),
3787                                                               LI);
3788              if (RV) return getSCEV(RV);
3789            }
3790          }
3791
3792      // Okay, this is an expression that we cannot symbolically evaluate
3793      // into a SCEV.  Check to see if it's possible to symbolically evaluate
3794      // the arguments into constants, and if so, try to constant propagate the
3795      // result.  This is particularly useful for computing loop exit values.
3796      if (CanConstantFold(I)) {
3797        // Check to see if we've folded this instruction at this loop before.
3798        std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3799        std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3800          Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3801        if (!Pair.second)
3802          return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3803
3804        std::vector<Constant*> Operands;
3805        Operands.reserve(I->getNumOperands());
3806        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3807          Value *Op = I->getOperand(i);
3808          if (Constant *C = dyn_cast<Constant>(Op)) {
3809            Operands.push_back(C);
3810          } else {
3811            // If any of the operands is non-constant and if they are
3812            // non-integer and non-pointer, don't even try to analyze them
3813            // with scev techniques.
3814            if (!isSCEVable(Op->getType()))
3815              return V;
3816
3817            const SCEV* OpV = getSCEVAtScope(Op, L);
3818            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3819              Constant *C = SC->getValue();
3820              if (C->getType() != Op->getType())
3821                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3822                                                                  Op->getType(),
3823                                                                  false),
3824                                          C, Op->getType());
3825              Operands.push_back(C);
3826            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3827              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3828                if (C->getType() != Op->getType())
3829                  C =
3830                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3831                                                                  Op->getType(),
3832                                                                  false),
3833                                          C, Op->getType());
3834                Operands.push_back(C);
3835              } else
3836                return V;
3837            } else {
3838              return V;
3839            }
3840          }
3841        }
3842
3843        Constant *C;
3844        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3845          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3846                                              &Operands[0], Operands.size(),
3847                                              Context);
3848        else
3849          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3850                                       &Operands[0], Operands.size(), Context);
3851        Pair.first->second = C;
3852        return getSCEV(C);
3853      }
3854    }
3855
3856    // This is some other type of SCEVUnknown, just return it.
3857    return V;
3858  }
3859
3860  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3861    // Avoid performing the look-up in the common case where the specified
3862    // expression has no loop-variant portions.
3863    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3864      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3865      if (OpAtScope != Comm->getOperand(i)) {
3866        // Okay, at least one of these operands is loop variant but might be
3867        // foldable.  Build a new instance of the folded commutative expression.
3868        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3869                                            Comm->op_begin()+i);
3870        NewOps.push_back(OpAtScope);
3871
3872        for (++i; i != e; ++i) {
3873          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3874          NewOps.push_back(OpAtScope);
3875        }
3876        if (isa<SCEVAddExpr>(Comm))
3877          return getAddExpr(NewOps);
3878        if (isa<SCEVMulExpr>(Comm))
3879          return getMulExpr(NewOps);
3880        if (isa<SCEVSMaxExpr>(Comm))
3881          return getSMaxExpr(NewOps);
3882        if (isa<SCEVUMaxExpr>(Comm))
3883          return getUMaxExpr(NewOps);
3884        LLVM_UNREACHABLE("Unknown commutative SCEV type!");
3885      }
3886    }
3887    // If we got here, all operands are loop invariant.
3888    return Comm;
3889  }
3890
3891  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3892    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3893    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3894    if (LHS == Div->getLHS() && RHS == Div->getRHS())
3895      return Div;   // must be loop invariant
3896    return getUDivExpr(LHS, RHS);
3897  }
3898
3899  // If this is a loop recurrence for a loop that does not contain L, then we
3900  // are dealing with the final value computed by the loop.
3901  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3902    if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3903      // To evaluate this recurrence, we need to know how many times the AddRec
3904      // loop iterates.  Compute this now.
3905      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3906      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3907
3908      // Then, evaluate the AddRec.
3909      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3910    }
3911    return AddRec;
3912  }
3913
3914  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3915    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3916    if (Op == Cast->getOperand())
3917      return Cast;  // must be loop invariant
3918    return getZeroExtendExpr(Op, Cast->getType());
3919  }
3920
3921  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3922    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3923    if (Op == Cast->getOperand())
3924      return Cast;  // must be loop invariant
3925    return getSignExtendExpr(Op, Cast->getType());
3926  }
3927
3928  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3929    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3930    if (Op == Cast->getOperand())
3931      return Cast;  // must be loop invariant
3932    return getTruncateExpr(Op, Cast->getType());
3933  }
3934
3935  LLVM_UNREACHABLE("Unknown SCEV type!");
3936  return 0;
3937}
3938
3939/// getSCEVAtScope - This is a convenience function which does
3940/// getSCEVAtScope(getSCEV(V), L).
3941const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3942  return getSCEVAtScope(getSCEV(V), L);
3943}
3944
3945/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3946/// following equation:
3947///
3948///     A * X = B (mod N)
3949///
3950/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3951/// A and B isn't important.
3952///
3953/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3954static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3955                                               ScalarEvolution &SE) {
3956  uint32_t BW = A.getBitWidth();
3957  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3958  assert(A != 0 && "A must be non-zero.");
3959
3960  // 1. D = gcd(A, N)
3961  //
3962  // The gcd of A and N may have only one prime factor: 2. The number of
3963  // trailing zeros in A is its multiplicity
3964  uint32_t Mult2 = A.countTrailingZeros();
3965  // D = 2^Mult2
3966
3967  // 2. Check if B is divisible by D.
3968  //
3969  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3970  // is not less than multiplicity of this prime factor for D.
3971  if (B.countTrailingZeros() < Mult2)
3972    return SE.getCouldNotCompute();
3973
3974  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3975  // modulo (N / D).
3976  //
3977  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
3978  // bit width during computations.
3979  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
3980  APInt Mod(BW + 1, 0);
3981  Mod.set(BW - Mult2);  // Mod = N / D
3982  APInt I = AD.multiplicativeInverse(Mod);
3983
3984  // 4. Compute the minimum unsigned root of the equation:
3985  // I * (B / D) mod (N / D)
3986  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
3987
3988  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
3989  // bits.
3990  return SE.getConstant(Result.trunc(BW));
3991}
3992
3993/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
3994/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
3995/// might be the same) or two SCEVCouldNotCompute objects.
3996///
3997static std::pair<const SCEV *,const SCEV *>
3998SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
3999  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4000  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4001  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4002  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4003
4004  // We currently can only solve this if the coefficients are constants.
4005  if (!LC || !MC || !NC) {
4006    const SCEV *CNC = SE.getCouldNotCompute();
4007    return std::make_pair(CNC, CNC);
4008  }
4009
4010  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4011  const APInt &L = LC->getValue()->getValue();
4012  const APInt &M = MC->getValue()->getValue();
4013  const APInt &N = NC->getValue()->getValue();
4014  APInt Two(BitWidth, 2);
4015  APInt Four(BitWidth, 4);
4016
4017  {
4018    using namespace APIntOps;
4019    const APInt& C = L;
4020    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4021    // The B coefficient is M-N/2
4022    APInt B(M);
4023    B -= sdiv(N,Two);
4024
4025    // The A coefficient is N/2
4026    APInt A(N.sdiv(Two));
4027
4028    // Compute the B^2-4ac term.
4029    APInt SqrtTerm(B);
4030    SqrtTerm *= B;
4031    SqrtTerm -= Four * (A * C);
4032
4033    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4034    // integer value or else APInt::sqrt() will assert.
4035    APInt SqrtVal(SqrtTerm.sqrt());
4036
4037    // Compute the two solutions for the quadratic formula.
4038    // The divisions must be performed as signed divisions.
4039    APInt NegB(-B);
4040    APInt TwoA( A << 1 );
4041    if (TwoA.isMinValue()) {
4042      const SCEV *CNC = SE.getCouldNotCompute();
4043      return std::make_pair(CNC, CNC);
4044    }
4045
4046    LLVMContext *Context = SE.getContext();
4047
4048    ConstantInt *Solution1 =
4049      Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA));
4050    ConstantInt *Solution2 =
4051      Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA));
4052
4053    return std::make_pair(SE.getConstant(Solution1),
4054                          SE.getConstant(Solution2));
4055    } // end APIntOps namespace
4056}
4057
4058/// HowFarToZero - Return the number of times a backedge comparing the specified
4059/// value to zero will execute.  If not computable, return CouldNotCompute.
4060const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4061  // If the value is a constant
4062  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4063    // If the value is already zero, the branch will execute zero times.
4064    if (C->getValue()->isZero()) return C;
4065    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4066  }
4067
4068  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4069  if (!AddRec || AddRec->getLoop() != L)
4070    return getCouldNotCompute();
4071
4072  if (AddRec->isAffine()) {
4073    // If this is an affine expression, the execution count of this branch is
4074    // the minimum unsigned root of the following equation:
4075    //
4076    //     Start + Step*N = 0 (mod 2^BW)
4077    //
4078    // equivalent to:
4079    //
4080    //             Step*N = -Start (mod 2^BW)
4081    //
4082    // where BW is the common bit width of Start and Step.
4083
4084    // Get the initial value for the loop.
4085    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4086                                       L->getParentLoop());
4087    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4088                                      L->getParentLoop());
4089
4090    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4091      // For now we handle only constant steps.
4092
4093      // First, handle unitary steps.
4094      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4095        return getNegativeSCEV(Start);       //   N = -Start (as unsigned)
4096      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4097        return Start;                           //    N = Start (as unsigned)
4098
4099      // Then, try to solve the above equation provided that Start is constant.
4100      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4101        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4102                                            -StartC->getValue()->getValue(),
4103                                            *this);
4104    }
4105  } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
4106    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4107    // the quadratic equation to solve it.
4108    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4109                                                                    *this);
4110    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4111    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4112    if (R1) {
4113#if 0
4114      errs() << "HFTZ: " << *V << " - sol#1: " << *R1
4115             << "  sol#2: " << *R2 << "\n";
4116#endif
4117      // Pick the smallest positive root value.
4118      if (ConstantInt *CB =
4119          dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT,
4120                                   R1->getValue(), R2->getValue()))) {
4121        if (CB->getZExtValue() == false)
4122          std::swap(R1, R2);   // R1 is the minimum root now.
4123
4124        // We can only use this value if the chrec ends up with an exact zero
4125        // value at this index.  When solving for "X*X != 5", for example, we
4126        // should not accept a root of 2.
4127        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4128        if (Val->isZero())
4129          return R1;  // We found a quadratic root!
4130      }
4131    }
4132  }
4133
4134  return getCouldNotCompute();
4135}
4136
4137/// HowFarToNonZero - Return the number of times a backedge checking the
4138/// specified value for nonzero will execute.  If not computable, return
4139/// CouldNotCompute
4140const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4141  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4142  // handle them yet except for the trivial case.  This could be expanded in the
4143  // future as needed.
4144
4145  // If the value is a constant, check to see if it is known to be non-zero
4146  // already.  If so, the backedge will execute zero times.
4147  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4148    if (!C->getValue()->isNullValue())
4149      return getIntegerSCEV(0, C->getType());
4150    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4151  }
4152
4153  // We could implement others, but I really doubt anyone writes loops like
4154  // this, and if they did, they would already be constant folded.
4155  return getCouldNotCompute();
4156}
4157
4158/// getLoopPredecessor - If the given loop's header has exactly one unique
4159/// predecessor outside the loop, return it. Otherwise return null.
4160///
4161BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4162  BasicBlock *Header = L->getHeader();
4163  BasicBlock *Pred = 0;
4164  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4165       PI != E; ++PI)
4166    if (!L->contains(*PI)) {
4167      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4168      Pred = *PI;
4169    }
4170  return Pred;
4171}
4172
4173/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4174/// (which may not be an immediate predecessor) which has exactly one
4175/// successor from which BB is reachable, or null if no such block is
4176/// found.
4177///
4178BasicBlock *
4179ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4180  // If the block has a unique predecessor, then there is no path from the
4181  // predecessor to the block that does not go through the direct edge
4182  // from the predecessor to the block.
4183  if (BasicBlock *Pred = BB->getSinglePredecessor())
4184    return Pred;
4185
4186  // A loop's header is defined to be a block that dominates the loop.
4187  // If the header has a unique predecessor outside the loop, it must be
4188  // a block that has exactly one successor that can reach the loop.
4189  if (Loop *L = LI->getLoopFor(BB))
4190    return getLoopPredecessor(L);
4191
4192  return 0;
4193}
4194
4195/// HasSameValue - SCEV structural equivalence is usually sufficient for
4196/// testing whether two expressions are equal, however for the purposes of
4197/// looking for a condition guarding a loop, it can be useful to be a little
4198/// more general, since a front-end may have replicated the controlling
4199/// expression.
4200///
4201static bool HasSameValue(const SCEV *A, const SCEV *B) {
4202  // Quick check to see if they are the same SCEV.
4203  if (A == B) return true;
4204
4205  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4206  // two different instructions with the same value. Check for this case.
4207  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4208    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4209      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4210        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4211          if (AI->isIdenticalTo(BI))
4212            return true;
4213
4214  // Otherwise assume they may have a different value.
4215  return false;
4216}
4217
4218bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4219  return getSignedRange(S).getSignedMax().isNegative();
4220}
4221
4222bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4223  return getSignedRange(S).getSignedMin().isStrictlyPositive();
4224}
4225
4226bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4227  return !getSignedRange(S).getSignedMin().isNegative();
4228}
4229
4230bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4231  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4232}
4233
4234bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4235  return isKnownNegative(S) || isKnownPositive(S);
4236}
4237
4238bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4239                                       const SCEV *LHS, const SCEV *RHS) {
4240
4241  if (HasSameValue(LHS, RHS))
4242    return ICmpInst::isTrueWhenEqual(Pred);
4243
4244  switch (Pred) {
4245  default:
4246    assert(0 && "Unexpected ICmpInst::Predicate value!");
4247    break;
4248  case ICmpInst::ICMP_SGT:
4249    Pred = ICmpInst::ICMP_SLT;
4250    std::swap(LHS, RHS);
4251  case ICmpInst::ICMP_SLT: {
4252    ConstantRange LHSRange = getSignedRange(LHS);
4253    ConstantRange RHSRange = getSignedRange(RHS);
4254    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4255      return true;
4256    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4257      return false;
4258
4259    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4260    ConstantRange DiffRange = getUnsignedRange(Diff);
4261    if (isKnownNegative(Diff)) {
4262      if (DiffRange.getUnsignedMax().ult(LHSRange.getUnsignedMin()))
4263        return true;
4264      if (DiffRange.getUnsignedMin().uge(LHSRange.getUnsignedMax()))
4265        return false;
4266    } else if (isKnownPositive(Diff)) {
4267      if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin()))
4268        return true;
4269      if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax()))
4270        return false;
4271    }
4272    break;
4273  }
4274  case ICmpInst::ICMP_SGE:
4275    Pred = ICmpInst::ICMP_SLE;
4276    std::swap(LHS, RHS);
4277  case ICmpInst::ICMP_SLE: {
4278    ConstantRange LHSRange = getSignedRange(LHS);
4279    ConstantRange RHSRange = getSignedRange(RHS);
4280    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4281      return true;
4282    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4283      return false;
4284
4285    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4286    ConstantRange DiffRange = getUnsignedRange(Diff);
4287    if (isKnownNonPositive(Diff)) {
4288      if (DiffRange.getUnsignedMax().ule(LHSRange.getUnsignedMin()))
4289        return true;
4290      if (DiffRange.getUnsignedMin().ugt(LHSRange.getUnsignedMax()))
4291        return false;
4292    } else if (isKnownNonNegative(Diff)) {
4293      if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin()))
4294        return true;
4295      if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax()))
4296        return false;
4297    }
4298    break;
4299  }
4300  case ICmpInst::ICMP_UGT:
4301    Pred = ICmpInst::ICMP_ULT;
4302    std::swap(LHS, RHS);
4303  case ICmpInst::ICMP_ULT: {
4304    ConstantRange LHSRange = getUnsignedRange(LHS);
4305    ConstantRange RHSRange = getUnsignedRange(RHS);
4306    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4307      return true;
4308    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4309      return false;
4310
4311    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4312    ConstantRange DiffRange = getUnsignedRange(Diff);
4313    if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin()))
4314      return true;
4315    if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax()))
4316      return false;
4317    break;
4318  }
4319  case ICmpInst::ICMP_UGE:
4320    Pred = ICmpInst::ICMP_ULE;
4321    std::swap(LHS, RHS);
4322  case ICmpInst::ICMP_ULE: {
4323    ConstantRange LHSRange = getUnsignedRange(LHS);
4324    ConstantRange RHSRange = getUnsignedRange(RHS);
4325    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4326      return true;
4327    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4328      return false;
4329
4330    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4331    ConstantRange DiffRange = getUnsignedRange(Diff);
4332    if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin()))
4333      return true;
4334    if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax()))
4335      return false;
4336    break;
4337  }
4338  case ICmpInst::ICMP_NE: {
4339    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4340      return true;
4341    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4342      return true;
4343
4344    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4345    if (isKnownNonZero(Diff))
4346      return true;
4347    break;
4348  }
4349  case ICmpInst::ICMP_EQ:
4350    break;
4351  }
4352  return false;
4353}
4354
4355/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4356/// protected by a conditional between LHS and RHS.  This is used to
4357/// to eliminate casts.
4358bool
4359ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4360                                             ICmpInst::Predicate Pred,
4361                                             const SCEV *LHS, const SCEV *RHS) {
4362  // Interpret a null as meaning no loop, where there is obviously no guard
4363  // (interprocedural conditions notwithstanding).
4364  if (!L) return true;
4365
4366  BasicBlock *Latch = L->getLoopLatch();
4367  if (!Latch)
4368    return false;
4369
4370  BranchInst *LoopContinuePredicate =
4371    dyn_cast<BranchInst>(Latch->getTerminator());
4372  if (!LoopContinuePredicate ||
4373      LoopContinuePredicate->isUnconditional())
4374    return false;
4375
4376  return
4377    isNecessaryCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4378                    LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4379}
4380
4381/// isLoopGuardedByCond - Test whether entry to the loop is protected
4382/// by a conditional between LHS and RHS.  This is used to help avoid max
4383/// expressions in loop trip counts, and to eliminate casts.
4384bool
4385ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4386                                     ICmpInst::Predicate Pred,
4387                                     const SCEV *LHS, const SCEV *RHS) {
4388  // Interpret a null as meaning no loop, where there is obviously no guard
4389  // (interprocedural conditions notwithstanding).
4390  if (!L) return false;
4391
4392  BasicBlock *Predecessor = getLoopPredecessor(L);
4393  BasicBlock *PredecessorDest = L->getHeader();
4394
4395  // Starting at the loop predecessor, climb up the predecessor chain, as long
4396  // as there are predecessors that can be found that have unique successors
4397  // leading to the original header.
4398  for (; Predecessor;
4399       PredecessorDest = Predecessor,
4400       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4401
4402    BranchInst *LoopEntryPredicate =
4403      dyn_cast<BranchInst>(Predecessor->getTerminator());
4404    if (!LoopEntryPredicate ||
4405        LoopEntryPredicate->isUnconditional())
4406      continue;
4407
4408    if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4409                        LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4410      return true;
4411  }
4412
4413  return false;
4414}
4415
4416/// isNecessaryCond - Test whether the condition described by Pred, LHS,
4417/// and RHS is a necessary condition for the given Cond value to evaluate
4418/// to true.
4419bool ScalarEvolution::isNecessaryCond(Value *CondValue,
4420                                      ICmpInst::Predicate Pred,
4421                                      const SCEV *LHS, const SCEV *RHS,
4422                                      bool Inverse) {
4423  // Recursivly handle And and Or conditions.
4424  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4425    if (BO->getOpcode() == Instruction::And) {
4426      if (!Inverse)
4427        return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4428               isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4429    } else if (BO->getOpcode() == Instruction::Or) {
4430      if (Inverse)
4431        return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4432               isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4433    }
4434  }
4435
4436  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4437  if (!ICI) return false;
4438
4439  // Now that we found a conditional branch that dominates the loop, check to
4440  // see if it is the comparison we are looking for.
4441  Value *PreCondLHS = ICI->getOperand(0);
4442  Value *PreCondRHS = ICI->getOperand(1);
4443  ICmpInst::Predicate FoundPred;
4444  if (Inverse)
4445    FoundPred = ICI->getInversePredicate();
4446  else
4447    FoundPred = ICI->getPredicate();
4448
4449  if (FoundPred == Pred)
4450    ; // An exact match.
4451  else if (!ICmpInst::isTrueWhenEqual(FoundPred) && Pred == ICmpInst::ICMP_NE) {
4452    // The actual condition is beyond sufficient.
4453    FoundPred = ICmpInst::ICMP_NE;
4454    // NE is symmetric but the original comparison may not be. Swap
4455    // the operands if necessary so that they match below.
4456    if (isa<SCEVConstant>(LHS))
4457      std::swap(PreCondLHS, PreCondRHS);
4458  } else
4459    // Check a few special cases.
4460    switch (FoundPred) {
4461    case ICmpInst::ICMP_UGT:
4462      if (Pred == ICmpInst::ICMP_ULT) {
4463        std::swap(PreCondLHS, PreCondRHS);
4464        FoundPred = ICmpInst::ICMP_ULT;
4465        break;
4466      }
4467      return false;
4468    case ICmpInst::ICMP_SGT:
4469      if (Pred == ICmpInst::ICMP_SLT) {
4470        std::swap(PreCondLHS, PreCondRHS);
4471        FoundPred = ICmpInst::ICMP_SLT;
4472        break;
4473      }
4474      return false;
4475    case ICmpInst::ICMP_NE:
4476      // Expressions like (x >u 0) are often canonicalized to (x != 0),
4477      // so check for this case by checking if the NE is comparing against
4478      // a minimum or maximum constant.
4479      if (!ICmpInst::isTrueWhenEqual(Pred))
4480        if (const SCEVConstant *C = dyn_cast<SCEVConstant>(RHS)) {
4481          const APInt &A = C->getValue()->getValue();
4482          switch (Pred) {
4483          case ICmpInst::ICMP_SLT:
4484            if (A.isMaxSignedValue()) break;
4485            return false;
4486          case ICmpInst::ICMP_SGT:
4487            if (A.isMinSignedValue()) break;
4488            return false;
4489          case ICmpInst::ICMP_ULT:
4490            if (A.isMaxValue()) break;
4491            return false;
4492          case ICmpInst::ICMP_UGT:
4493            if (A.isMinValue()) break;
4494            return false;
4495          default:
4496            return false;
4497          }
4498          FoundPred = Pred;
4499          // NE is symmetric but the original comparison may not be. Swap
4500          // the operands if necessary so that they match below.
4501          if (isa<SCEVConstant>(LHS))
4502            std::swap(PreCondLHS, PreCondRHS);
4503          break;
4504        }
4505      return false;
4506    default:
4507      // We weren't able to reconcile the condition.
4508      return false;
4509    }
4510
4511  assert(Pred == FoundPred && "Conditions were not reconciled!");
4512
4513  // Bail if the ICmp's operands' types are wider than the needed type
4514  // before attempting to call getSCEV on them. This avoids infinite
4515  // recursion, since the analysis of widening casts can require loop
4516  // exit condition information for overflow checking, which would
4517  // lead back here.
4518  if (getTypeSizeInBits(LHS->getType()) <
4519      getTypeSizeInBits(PreCondLHS->getType()))
4520    return false;
4521
4522  const SCEV *FoundLHS = getSCEV(PreCondLHS);
4523  const SCEV *FoundRHS = getSCEV(PreCondRHS);
4524
4525  // Balance the types. The case where FoundLHS' type is wider than
4526  // LHS' type is checked for above.
4527  if (getTypeSizeInBits(LHS->getType()) >
4528      getTypeSizeInBits(FoundLHS->getType())) {
4529    if (CmpInst::isSigned(Pred)) {
4530      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4531      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4532    } else {
4533      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4534      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4535    }
4536  }
4537
4538  return isNecessaryCondOperands(Pred, LHS, RHS,
4539                                 FoundLHS, FoundRHS) ||
4540         // ~x < ~y --> x > y
4541         isNecessaryCondOperands(Pred, LHS, RHS,
4542                                 getNotSCEV(FoundRHS), getNotSCEV(FoundLHS));
4543}
4544
4545/// isNecessaryCondOperands - Test whether the condition described by Pred,
4546/// LHS, and RHS is a necessary condition for the condition described by
4547/// Pred, FoundLHS, and FoundRHS to evaluate to true.
4548bool
4549ScalarEvolution::isNecessaryCondOperands(ICmpInst::Predicate Pred,
4550                                         const SCEV *LHS, const SCEV *RHS,
4551                                         const SCEV *FoundLHS,
4552                                         const SCEV *FoundRHS) {
4553  switch (Pred) {
4554  default: break;
4555  case ICmpInst::ICMP_SLT:
4556    if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
4557        isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
4558      return true;
4559    break;
4560  case ICmpInst::ICMP_SGT:
4561    if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
4562        isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
4563      return true;
4564    break;
4565  case ICmpInst::ICMP_ULT:
4566    if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
4567        isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
4568      return true;
4569    break;
4570  case ICmpInst::ICMP_UGT:
4571    if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
4572        isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
4573      return true;
4574    break;
4575  }
4576
4577  return false;
4578}
4579
4580/// getBECount - Subtract the end and start values and divide by the step,
4581/// rounding up, to get the number of times the backedge is executed. Return
4582/// CouldNotCompute if an intermediate computation overflows.
4583const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4584                                        const SCEV *End,
4585                                        const SCEV *Step) {
4586  const Type *Ty = Start->getType();
4587  const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4588  const SCEV *Diff = getMinusSCEV(End, Start);
4589  const SCEV *RoundUp = getAddExpr(Step, NegOne);
4590
4591  // Add an adjustment to the difference between End and Start so that
4592  // the division will effectively round up.
4593  const SCEV *Add = getAddExpr(Diff, RoundUp);
4594
4595  // Check Add for unsigned overflow.
4596  // TODO: More sophisticated things could be done here.
4597  const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1);
4598  const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
4599  const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
4600  const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
4601  if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4602    return getCouldNotCompute();
4603
4604  return getUDivExpr(Add, Step);
4605}
4606
4607/// HowManyLessThans - Return the number of times a backedge containing the
4608/// specified less-than comparison will execute.  If not computable, return
4609/// CouldNotCompute.
4610ScalarEvolution::BackedgeTakenInfo
4611ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4612                                  const Loop *L, bool isSigned) {
4613  // Only handle:  "ADDREC < LoopInvariant".
4614  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4615
4616  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4617  if (!AddRec || AddRec->getLoop() != L)
4618    return getCouldNotCompute();
4619
4620  if (AddRec->isAffine()) {
4621    // FORNOW: We only support unit strides.
4622    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4623    const SCEV *Step = AddRec->getStepRecurrence(*this);
4624
4625    // TODO: handle non-constant strides.
4626    const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4627    if (!CStep || CStep->isZero())
4628      return getCouldNotCompute();
4629    if (CStep->isOne()) {
4630      // With unit stride, the iteration never steps past the limit value.
4631    } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4632      if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4633        // Test whether a positive iteration iteration can step past the limit
4634        // value and past the maximum value for its type in a single step.
4635        if (isSigned) {
4636          APInt Max = APInt::getSignedMaxValue(BitWidth);
4637          if ((Max - CStep->getValue()->getValue())
4638                .slt(CLimit->getValue()->getValue()))
4639            return getCouldNotCompute();
4640        } else {
4641          APInt Max = APInt::getMaxValue(BitWidth);
4642          if ((Max - CStep->getValue()->getValue())
4643                .ult(CLimit->getValue()->getValue()))
4644            return getCouldNotCompute();
4645        }
4646      } else
4647        // TODO: handle non-constant limit values below.
4648        return getCouldNotCompute();
4649    } else
4650      // TODO: handle negative strides below.
4651      return getCouldNotCompute();
4652
4653    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4654    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
4655    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4656    // treat m-n as signed nor unsigned due to overflow possibility.
4657
4658    // First, we get the value of the LHS in the first iteration: n
4659    const SCEV *Start = AddRec->getOperand(0);
4660
4661    // Determine the minimum constant start value.
4662    const SCEV *MinStart = getConstant(isSigned ?
4663      getSignedRange(Start).getSignedMin() :
4664      getUnsignedRange(Start).getUnsignedMin());
4665
4666    // If we know that the condition is true in order to enter the loop,
4667    // then we know that it will run exactly (m-n)/s times. Otherwise, we
4668    // only know that it will execute (max(m,n)-n)/s times. In both cases,
4669    // the division must round up.
4670    const SCEV *End = RHS;
4671    if (!isLoopGuardedByCond(L,
4672                             isSigned ? ICmpInst::ICMP_SLT :
4673                                        ICmpInst::ICMP_ULT,
4674                             getMinusSCEV(Start, Step), RHS))
4675      End = isSigned ? getSMaxExpr(RHS, Start)
4676                     : getUMaxExpr(RHS, Start);
4677
4678    // Determine the maximum constant end value.
4679    const SCEV *MaxEnd = getConstant(isSigned ?
4680      getSignedRange(End).getSignedMax() :
4681      getUnsignedRange(End).getUnsignedMax());
4682
4683    // Finally, we subtract these two values and divide, rounding up, to get
4684    // the number of times the backedge is executed.
4685    const SCEV *BECount = getBECount(Start, End, Step);
4686
4687    // The maximum backedge count is similar, except using the minimum start
4688    // value and the maximum end value.
4689    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4690
4691    return BackedgeTakenInfo(BECount, MaxBECount);
4692  }
4693
4694  return getCouldNotCompute();
4695}
4696
4697/// getNumIterationsInRange - Return the number of iterations of this loop that
4698/// produce values in the specified constant range.  Another way of looking at
4699/// this is that it returns the first iteration number where the value is not in
4700/// the condition, thus computing the exit count. If the iteration count can't
4701/// be computed, an instance of SCEVCouldNotCompute is returned.
4702const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4703                                                    ScalarEvolution &SE) const {
4704  if (Range.isFullSet())  // Infinite loop.
4705    return SE.getCouldNotCompute();
4706
4707  // If the start is a non-zero constant, shift the range to simplify things.
4708  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4709    if (!SC->getValue()->isZero()) {
4710      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4711      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4712      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4713      if (const SCEVAddRecExpr *ShiftedAddRec =
4714            dyn_cast<SCEVAddRecExpr>(Shifted))
4715        return ShiftedAddRec->getNumIterationsInRange(
4716                           Range.subtract(SC->getValue()->getValue()), SE);
4717      // This is strange and shouldn't happen.
4718      return SE.getCouldNotCompute();
4719    }
4720
4721  // The only time we can solve this is when we have all constant indices.
4722  // Otherwise, we cannot determine the overflow conditions.
4723  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4724    if (!isa<SCEVConstant>(getOperand(i)))
4725      return SE.getCouldNotCompute();
4726
4727
4728  // Okay at this point we know that all elements of the chrec are constants and
4729  // that the start element is zero.
4730
4731  // First check to see if the range contains zero.  If not, the first
4732  // iteration exits.
4733  unsigned BitWidth = SE.getTypeSizeInBits(getType());
4734  if (!Range.contains(APInt(BitWidth, 0)))
4735    return SE.getIntegerSCEV(0, getType());
4736
4737  if (isAffine()) {
4738    // If this is an affine expression then we have this situation:
4739    //   Solve {0,+,A} in Range  ===  Ax in Range
4740
4741    // We know that zero is in the range.  If A is positive then we know that
4742    // the upper value of the range must be the first possible exit value.
4743    // If A is negative then the lower of the range is the last possible loop
4744    // value.  Also note that we already checked for a full range.
4745    APInt One(BitWidth,1);
4746    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4747    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4748
4749    // The exit value should be (End+A)/A.
4750    APInt ExitVal = (End + A).udiv(A);
4751    ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal);
4752
4753    // Evaluate at the exit value.  If we really did fall out of the valid
4754    // range, then we computed our trip count, otherwise wrap around or other
4755    // things must have happened.
4756    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4757    if (Range.contains(Val->getValue()))
4758      return SE.getCouldNotCompute();  // Something strange happened
4759
4760    // Ensure that the previous value is in the range.  This is a sanity check.
4761    assert(Range.contains(
4762           EvaluateConstantChrecAtConstant(this,
4763           SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) &&
4764           "Linear scev computation is off in a bad way!");
4765    return SE.getConstant(ExitValue);
4766  } else if (isQuadratic()) {
4767    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4768    // quadratic equation to solve it.  To do this, we must frame our problem in
4769    // terms of figuring out when zero is crossed, instead of when
4770    // Range.getUpper() is crossed.
4771    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4772    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4773    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4774
4775    // Next, solve the constructed addrec
4776    std::pair<const SCEV *,const SCEV *> Roots =
4777      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4778    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4779    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4780    if (R1) {
4781      // Pick the smallest positive root value.
4782      if (ConstantInt *CB =
4783          dyn_cast<ConstantInt>(
4784                       SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT,
4785                         R1->getValue(), R2->getValue()))) {
4786        if (CB->getZExtValue() == false)
4787          std::swap(R1, R2);   // R1 is the minimum root now.
4788
4789        // Make sure the root is not off by one.  The returned iteration should
4790        // not be in the range, but the previous one should be.  When solving
4791        // for "X*X < 5", for example, we should not return a root of 2.
4792        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4793                                                             R1->getValue(),
4794                                                             SE);
4795        if (Range.contains(R1Val->getValue())) {
4796          // The next iteration must be out of the range...
4797          ConstantInt *NextVal =
4798                 SE.getContext()->getConstantInt(R1->getValue()->getValue()+1);
4799
4800          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4801          if (!Range.contains(R1Val->getValue()))
4802            return SE.getConstant(NextVal);
4803          return SE.getCouldNotCompute();  // Something strange happened
4804        }
4805
4806        // If R1 was not in the range, then it is a good return value.  Make
4807        // sure that R1-1 WAS in the range though, just in case.
4808        ConstantInt *NextVal =
4809                 SE.getContext()->getConstantInt(R1->getValue()->getValue()-1);
4810        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4811        if (Range.contains(R1Val->getValue()))
4812          return R1;
4813        return SE.getCouldNotCompute();  // Something strange happened
4814      }
4815    }
4816  }
4817
4818  return SE.getCouldNotCompute();
4819}
4820
4821
4822
4823//===----------------------------------------------------------------------===//
4824//                   SCEVCallbackVH Class Implementation
4825//===----------------------------------------------------------------------===//
4826
4827void ScalarEvolution::SCEVCallbackVH::deleted() {
4828  assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4829  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4830    SE->ConstantEvolutionLoopExitValue.erase(PN);
4831  if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4832    SE->ValuesAtScopes.erase(I);
4833  SE->Scalars.erase(getValPtr());
4834  // this now dangles!
4835}
4836
4837void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4838  assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4839
4840  // Forget all the expressions associated with users of the old value,
4841  // so that future queries will recompute the expressions using the new
4842  // value.
4843  SmallVector<User *, 16> Worklist;
4844  Value *Old = getValPtr();
4845  bool DeleteOld = false;
4846  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4847       UI != UE; ++UI)
4848    Worklist.push_back(*UI);
4849  while (!Worklist.empty()) {
4850    User *U = Worklist.pop_back_val();
4851    // Deleting the Old value will cause this to dangle. Postpone
4852    // that until everything else is done.
4853    if (U == Old) {
4854      DeleteOld = true;
4855      continue;
4856    }
4857    if (PHINode *PN = dyn_cast<PHINode>(U))
4858      SE->ConstantEvolutionLoopExitValue.erase(PN);
4859    if (Instruction *I = dyn_cast<Instruction>(U))
4860      SE->ValuesAtScopes.erase(I);
4861    if (SE->Scalars.erase(U))
4862      for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4863           UI != UE; ++UI)
4864        Worklist.push_back(*UI);
4865  }
4866  if (DeleteOld) {
4867    if (PHINode *PN = dyn_cast<PHINode>(Old))
4868      SE->ConstantEvolutionLoopExitValue.erase(PN);
4869    if (Instruction *I = dyn_cast<Instruction>(Old))
4870      SE->ValuesAtScopes.erase(I);
4871    SE->Scalars.erase(Old);
4872    // this now dangles!
4873  }
4874  // this may dangle!
4875}
4876
4877ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4878  : CallbackVH(V), SE(se) {}
4879
4880//===----------------------------------------------------------------------===//
4881//                   ScalarEvolution Class Implementation
4882//===----------------------------------------------------------------------===//
4883
4884ScalarEvolution::ScalarEvolution()
4885  : FunctionPass(&ID) {
4886}
4887
4888bool ScalarEvolution::runOnFunction(Function &F) {
4889  this->F = &F;
4890  LI = &getAnalysis<LoopInfo>();
4891  TD = getAnalysisIfAvailable<TargetData>();
4892  return false;
4893}
4894
4895void ScalarEvolution::releaseMemory() {
4896  Scalars.clear();
4897  BackedgeTakenCounts.clear();
4898  ConstantEvolutionLoopExitValue.clear();
4899  ValuesAtScopes.clear();
4900  UniqueSCEVs.clear();
4901  SCEVAllocator.Reset();
4902}
4903
4904void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4905  AU.setPreservesAll();
4906  AU.addRequiredTransitive<LoopInfo>();
4907}
4908
4909bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4910  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4911}
4912
4913static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4914                          const Loop *L) {
4915  // Print all inner loops first
4916  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4917    PrintLoopInfo(OS, SE, *I);
4918
4919  OS << "Loop " << L->getHeader()->getName() << ": ";
4920
4921  SmallVector<BasicBlock*, 8> ExitBlocks;
4922  L->getExitBlocks(ExitBlocks);
4923  if (ExitBlocks.size() != 1)
4924    OS << "<multiple exits> ";
4925
4926  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4927    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4928  } else {
4929    OS << "Unpredictable backedge-taken count. ";
4930  }
4931
4932  OS << "\n";
4933  OS << "Loop " << L->getHeader()->getName() << ": ";
4934
4935  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4936    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4937  } else {
4938    OS << "Unpredictable max backedge-taken count. ";
4939  }
4940
4941  OS << "\n";
4942}
4943
4944void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4945  // ScalarEvolution's implementaiton of the print method is to print
4946  // out SCEV values of all instructions that are interesting. Doing
4947  // this potentially causes it to create new SCEV objects though,
4948  // which technically conflicts with the const qualifier. This isn't
4949  // observable from outside the class though, so casting away the
4950  // const isn't dangerous.
4951  ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4952
4953  OS << "Classifying expressions for: " << F->getName() << "\n";
4954  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4955    if (isSCEVable(I->getType())) {
4956      OS << *I;
4957      OS << "  -->  ";
4958      const SCEV *SV = SE.getSCEV(&*I);
4959      SV->print(OS);
4960
4961      const Loop *L = LI->getLoopFor((*I).getParent());
4962
4963      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
4964      if (AtUse != SV) {
4965        OS << "  -->  ";
4966        AtUse->print(OS);
4967      }
4968
4969      if (L) {
4970        OS << "\t\t" "Exits: ";
4971        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
4972        if (!ExitValue->isLoopInvariant(L)) {
4973          OS << "<<Unknown>>";
4974        } else {
4975          OS << *ExitValue;
4976        }
4977      }
4978
4979      OS << "\n";
4980    }
4981
4982  OS << "Determining loop execution counts for: " << F->getName() << "\n";
4983  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
4984    PrintLoopInfo(OS, &SE, *I);
4985}
4986
4987void ScalarEvolution::print(std::ostream &o, const Module *M) const {
4988  raw_os_ostream OS(o);
4989  print(OS, M);
4990}
4991