ScalarEvolution.cpp revision ca178908c8dc2303a1fb54a8a93bab0f0b964e11
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle.  These classes are reference counted, managed by the const SCEV *
18// class.  We only create one SCEV of a particular shape, so pointer-comparisons
19// for equality are legal.
20//
21// One important aspect of the SCEV objects is that they are never cyclic, even
22// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
23// the PHI node is one of the idioms that we can represent (e.g., a polynomial
24// recurrence) then we represent it directly as a recurrence node, otherwise we
25// represent it as a SCEVUnknown node.
26//
27// In addition to being able to represent expressions of various types, we also
28// have folders that are used to build the *canonical* representation for a
29// particular expression.  These folders are capable of using a variety of
30// rewrite rules to simplify the expressions.
31//
32// Once the folders are defined, we can implement the more interesting
33// higher-level code, such as the code that recognizes PHI nodes of various
34// types, computes the execution count of a loop, etc.
35//
36// TODO: We should use these routines and value representations to implement
37// dependence analysis!
38//
39//===----------------------------------------------------------------------===//
40//
41// There are several good references for the techniques used in this analysis.
42//
43//  Chains of recurrences -- a method to expedite the evaluation
44//  of closed-form functions
45//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46//
47//  On computational properties of chains of recurrences
48//  Eugene V. Zima
49//
50//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51//  Robert A. van Engelen
52//
53//  Efficient Symbolic Analysis for Optimizing Compilers
54//  Robert A. van Engelen
55//
56//  Using the chains of recurrences algebra for data dependence testing and
57//  induction variable substitution
58//  MS Thesis, Johnie Birch
59//
60//===----------------------------------------------------------------------===//
61
62#define DEBUG_TYPE "scalar-evolution"
63#include "llvm/Analysis/ScalarEvolutionExpressions.h"
64#include "llvm/Constants.h"
65#include "llvm/DerivedTypes.h"
66#include "llvm/GlobalVariable.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Operator.h"
70#include "llvm/Analysis/ConstantFolding.h"
71#include "llvm/Analysis/Dominators.h"
72#include "llvm/Analysis/LoopInfo.h"
73#include "llvm/Analysis/ValueTracking.h"
74#include "llvm/Assembly/Writer.h"
75#include "llvm/Target/TargetData.h"
76#include "llvm/Support/CommandLine.h"
77#include "llvm/Support/Compiler.h"
78#include "llvm/Support/ConstantRange.h"
79#include "llvm/Support/ErrorHandling.h"
80#include "llvm/Support/GetElementPtrTypeIterator.h"
81#include "llvm/Support/InstIterator.h"
82#include "llvm/Support/MathExtras.h"
83#include "llvm/Support/raw_ostream.h"
84#include "llvm/ADT/Statistic.h"
85#include "llvm/ADT/STLExtras.h"
86#include "llvm/ADT/SmallPtrSet.h"
87#include <algorithm>
88using namespace llvm;
89
90STATISTIC(NumArrayLenItCounts,
91          "Number of trip counts computed with array length");
92STATISTIC(NumTripCountsComputed,
93          "Number of loops with predictable loop counts");
94STATISTIC(NumTripCountsNotComputed,
95          "Number of loops without predictable loop counts");
96STATISTIC(NumBruteForceTripCountsComputed,
97          "Number of loops with trip counts computed by force");
98
99static cl::opt<unsigned>
100MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
101                        cl::desc("Maximum number of iterations SCEV will "
102                                 "symbolically execute a constant "
103                                 "derived loop"),
104                        cl::init(100));
105
106static RegisterPass<ScalarEvolution>
107R("scalar-evolution", "Scalar Evolution Analysis", false, true);
108char ScalarEvolution::ID = 0;
109
110//===----------------------------------------------------------------------===//
111//                           SCEV class definitions
112//===----------------------------------------------------------------------===//
113
114//===----------------------------------------------------------------------===//
115// Implementation of the SCEV class.
116//
117
118SCEV::~SCEV() {}
119
120void SCEV::dump() const {
121  print(errs());
122  errs() << '\n';
123}
124
125void SCEV::print(std::ostream &o) const {
126  raw_os_ostream OS(o);
127  print(OS);
128}
129
130bool SCEV::isZero() const {
131  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
132    return SC->getValue()->isZero();
133  return false;
134}
135
136bool SCEV::isOne() const {
137  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
138    return SC->getValue()->isOne();
139  return false;
140}
141
142bool SCEV::isAllOnesValue() const {
143  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
144    return SC->getValue()->isAllOnesValue();
145  return false;
146}
147
148SCEVCouldNotCompute::SCEVCouldNotCompute() :
149  SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
150
151bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
152  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153  return false;
154}
155
156const Type *SCEVCouldNotCompute::getType() const {
157  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158  return 0;
159}
160
161bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
162  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
163  return false;
164}
165
166const SCEV *
167SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete(
168                                                    const SCEV *Sym,
169                                                    const SCEV *Conc,
170                                                    ScalarEvolution &SE) const {
171  return this;
172}
173
174void SCEVCouldNotCompute::print(raw_ostream &OS) const {
175  OS << "***COULDNOTCOMPUTE***";
176}
177
178bool SCEVCouldNotCompute::classof(const SCEV *S) {
179  return S->getSCEVType() == scCouldNotCompute;
180}
181
182const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
183  FoldingSetNodeID ID;
184  ID.AddInteger(scConstant);
185  ID.AddPointer(V);
186  void *IP = 0;
187  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
188  SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
189  new (S) SCEVConstant(ID, V);
190  UniqueSCEVs.InsertNode(S, IP);
191  return S;
192}
193
194const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
195  return getConstant(Context->getConstantInt(Val));
196}
197
198const SCEV *
199ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
200  return getConstant(
201    Context->getConstantInt(cast<IntegerType>(Ty), V, isSigned));
202}
203
204const Type *SCEVConstant::getType() const { return V->getType(); }
205
206void SCEVConstant::print(raw_ostream &OS) const {
207  WriteAsOperand(OS, V, false);
208}
209
210SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
211                           unsigned SCEVTy, const SCEV *op, const Type *ty)
212  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
213
214bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
215  return Op->dominates(BB, DT);
216}
217
218SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
219                                   const SCEV *op, const Type *ty)
220  : SCEVCastExpr(ID, scTruncate, op, ty) {
221  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
222         (Ty->isInteger() || isa<PointerType>(Ty)) &&
223         "Cannot truncate non-integer value!");
224}
225
226void SCEVTruncateExpr::print(raw_ostream &OS) const {
227  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
228}
229
230SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
231                                       const SCEV *op, const Type *ty)
232  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
233  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
234         (Ty->isInteger() || isa<PointerType>(Ty)) &&
235         "Cannot zero extend non-integer value!");
236}
237
238void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
239  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
240}
241
242SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
243                                       const SCEV *op, const Type *ty)
244  : SCEVCastExpr(ID, scSignExtend, op, ty) {
245  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
246         (Ty->isInteger() || isa<PointerType>(Ty)) &&
247         "Cannot sign extend non-integer value!");
248}
249
250void SCEVSignExtendExpr::print(raw_ostream &OS) const {
251  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
252}
253
254void SCEVCommutativeExpr::print(raw_ostream &OS) const {
255  assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
256  const char *OpStr = getOperationStr();
257  OS << "(" << *Operands[0];
258  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
259    OS << OpStr << *Operands[i];
260  OS << ")";
261}
262
263const SCEV *
264SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete(
265                                                    const SCEV *Sym,
266                                                    const SCEV *Conc,
267                                                    ScalarEvolution &SE) const {
268  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
269    const SCEV *H =
270      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
271    if (H != getOperand(i)) {
272      SmallVector<const SCEV *, 8> NewOps;
273      NewOps.reserve(getNumOperands());
274      for (unsigned j = 0; j != i; ++j)
275        NewOps.push_back(getOperand(j));
276      NewOps.push_back(H);
277      for (++i; i != e; ++i)
278        NewOps.push_back(getOperand(i)->
279                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
280
281      if (isa<SCEVAddExpr>(this))
282        return SE.getAddExpr(NewOps);
283      else if (isa<SCEVMulExpr>(this))
284        return SE.getMulExpr(NewOps);
285      else if (isa<SCEVSMaxExpr>(this))
286        return SE.getSMaxExpr(NewOps);
287      else if (isa<SCEVUMaxExpr>(this))
288        return SE.getUMaxExpr(NewOps);
289      else
290        llvm_unreachable("Unknown commutative expr!");
291    }
292  }
293  return this;
294}
295
296bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
297  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
298    if (!getOperand(i)->dominates(BB, DT))
299      return false;
300  }
301  return true;
302}
303
304bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
305  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
306}
307
308void SCEVUDivExpr::print(raw_ostream &OS) const {
309  OS << "(" << *LHS << " /u " << *RHS << ")";
310}
311
312const Type *SCEVUDivExpr::getType() const {
313  // In most cases the types of LHS and RHS will be the same, but in some
314  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
315  // depend on the type for correctness, but handling types carefully can
316  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
317  // a pointer type than the RHS, so use the RHS' type here.
318  return RHS->getType();
319}
320
321const SCEV *
322SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym,
323                                                  const SCEV *Conc,
324                                                  ScalarEvolution &SE) const {
325  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
326    const SCEV *H =
327      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
328    if (H != getOperand(i)) {
329      SmallVector<const SCEV *, 8> NewOps;
330      NewOps.reserve(getNumOperands());
331      for (unsigned j = 0; j != i; ++j)
332        NewOps.push_back(getOperand(j));
333      NewOps.push_back(H);
334      for (++i; i != e; ++i)
335        NewOps.push_back(getOperand(i)->
336                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
337
338      return SE.getAddRecExpr(NewOps, L);
339    }
340  }
341  return this;
342}
343
344
345bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
346  // Add recurrences are never invariant in the function-body (null loop).
347  if (!QueryLoop)
348    return false;
349
350  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
351  if (QueryLoop->contains(L->getHeader()))
352    return false;
353
354  // This recurrence is variant w.r.t. QueryLoop if any of its operands
355  // are variant.
356  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
357    if (!getOperand(i)->isLoopInvariant(QueryLoop))
358      return false;
359
360  // Otherwise it's loop-invariant.
361  return true;
362}
363
364void SCEVAddRecExpr::print(raw_ostream &OS) const {
365  OS << "{" << *Operands[0];
366  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
367    OS << ",+," << *Operands[i];
368  OS << "}<" << L->getHeader()->getName() + ">";
369}
370
371bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
372  // All non-instruction values are loop invariant.  All instructions are loop
373  // invariant if they are not contained in the specified loop.
374  // Instructions are never considered invariant in the function body
375  // (null loop) because they are defined within the "loop".
376  if (Instruction *I = dyn_cast<Instruction>(V))
377    return L && !L->contains(I->getParent());
378  return true;
379}
380
381bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
382  if (Instruction *I = dyn_cast<Instruction>(getValue()))
383    return DT->dominates(I->getParent(), BB);
384  return true;
385}
386
387const Type *SCEVUnknown::getType() const {
388  return V->getType();
389}
390
391void SCEVUnknown::print(raw_ostream &OS) const {
392  WriteAsOperand(OS, V, false);
393}
394
395//===----------------------------------------------------------------------===//
396//                               SCEV Utilities
397//===----------------------------------------------------------------------===//
398
399namespace {
400  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
401  /// than the complexity of the RHS.  This comparator is used to canonicalize
402  /// expressions.
403  class VISIBILITY_HIDDEN SCEVComplexityCompare {
404    LoopInfo *LI;
405  public:
406    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
407
408    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
409      // Primarily, sort the SCEVs by their getSCEVType().
410      if (LHS->getSCEVType() != RHS->getSCEVType())
411        return LHS->getSCEVType() < RHS->getSCEVType();
412
413      // Aside from the getSCEVType() ordering, the particular ordering
414      // isn't very important except that it's beneficial to be consistent,
415      // so that (a + b) and (b + a) don't end up as different expressions.
416
417      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
418      // not as complete as it could be.
419      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
420        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
421
422        // Order pointer values after integer values. This helps SCEVExpander
423        // form GEPs.
424        if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
425          return false;
426        if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
427          return true;
428
429        // Compare getValueID values.
430        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
431          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
432
433        // Sort arguments by their position.
434        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
435          const Argument *RA = cast<Argument>(RU->getValue());
436          return LA->getArgNo() < RA->getArgNo();
437        }
438
439        // For instructions, compare their loop depth, and their opcode.
440        // This is pretty loose.
441        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
442          Instruction *RV = cast<Instruction>(RU->getValue());
443
444          // Compare loop depths.
445          if (LI->getLoopDepth(LV->getParent()) !=
446              LI->getLoopDepth(RV->getParent()))
447            return LI->getLoopDepth(LV->getParent()) <
448                   LI->getLoopDepth(RV->getParent());
449
450          // Compare opcodes.
451          if (LV->getOpcode() != RV->getOpcode())
452            return LV->getOpcode() < RV->getOpcode();
453
454          // Compare the number of operands.
455          if (LV->getNumOperands() != RV->getNumOperands())
456            return LV->getNumOperands() < RV->getNumOperands();
457        }
458
459        return false;
460      }
461
462      // Compare constant values.
463      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
464        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
465        if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
466          return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
467        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
468      }
469
470      // Compare addrec loop depths.
471      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
472        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
473        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
474          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
475      }
476
477      // Lexicographically compare n-ary expressions.
478      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
479        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
480        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
481          if (i >= RC->getNumOperands())
482            return false;
483          if (operator()(LC->getOperand(i), RC->getOperand(i)))
484            return true;
485          if (operator()(RC->getOperand(i), LC->getOperand(i)))
486            return false;
487        }
488        return LC->getNumOperands() < RC->getNumOperands();
489      }
490
491      // Lexicographically compare udiv expressions.
492      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
493        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
494        if (operator()(LC->getLHS(), RC->getLHS()))
495          return true;
496        if (operator()(RC->getLHS(), LC->getLHS()))
497          return false;
498        if (operator()(LC->getRHS(), RC->getRHS()))
499          return true;
500        if (operator()(RC->getRHS(), LC->getRHS()))
501          return false;
502        return false;
503      }
504
505      // Compare cast expressions by operand.
506      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
507        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
508        return operator()(LC->getOperand(), RC->getOperand());
509      }
510
511      llvm_unreachable("Unknown SCEV kind!");
512      return false;
513    }
514  };
515}
516
517/// GroupByComplexity - Given a list of SCEV objects, order them by their
518/// complexity, and group objects of the same complexity together by value.
519/// When this routine is finished, we know that any duplicates in the vector are
520/// consecutive and that complexity is monotonically increasing.
521///
522/// Note that we go take special precautions to ensure that we get determinstic
523/// results from this routine.  In other words, we don't want the results of
524/// this to depend on where the addresses of various SCEV objects happened to
525/// land in memory.
526///
527static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
528                              LoopInfo *LI) {
529  if (Ops.size() < 2) return;  // Noop
530  if (Ops.size() == 2) {
531    // This is the common case, which also happens to be trivially simple.
532    // Special case it.
533    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
534      std::swap(Ops[0], Ops[1]);
535    return;
536  }
537
538  // Do the rough sort by complexity.
539  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
540
541  // Now that we are sorted by complexity, group elements of the same
542  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
543  // be extremely short in practice.  Note that we take this approach because we
544  // do not want to depend on the addresses of the objects we are grouping.
545  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
546    const SCEV *S = Ops[i];
547    unsigned Complexity = S->getSCEVType();
548
549    // If there are any objects of the same complexity and same value as this
550    // one, group them.
551    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
552      if (Ops[j] == S) { // Found a duplicate.
553        // Move it to immediately after i'th element.
554        std::swap(Ops[i+1], Ops[j]);
555        ++i;   // no need to rescan it.
556        if (i == e-2) return;  // Done!
557      }
558    }
559  }
560}
561
562
563
564//===----------------------------------------------------------------------===//
565//                      Simple SCEV method implementations
566//===----------------------------------------------------------------------===//
567
568/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
569/// Assume, K > 0.
570static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
571                                      ScalarEvolution &SE,
572                                      const Type* ResultTy) {
573  // Handle the simplest case efficiently.
574  if (K == 1)
575    return SE.getTruncateOrZeroExtend(It, ResultTy);
576
577  // We are using the following formula for BC(It, K):
578  //
579  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
580  //
581  // Suppose, W is the bitwidth of the return value.  We must be prepared for
582  // overflow.  Hence, we must assure that the result of our computation is
583  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
584  // safe in modular arithmetic.
585  //
586  // However, this code doesn't use exactly that formula; the formula it uses
587  // is something like the following, where T is the number of factors of 2 in
588  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
589  // exponentiation:
590  //
591  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
592  //
593  // This formula is trivially equivalent to the previous formula.  However,
594  // this formula can be implemented much more efficiently.  The trick is that
595  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
596  // arithmetic.  To do exact division in modular arithmetic, all we have
597  // to do is multiply by the inverse.  Therefore, this step can be done at
598  // width W.
599  //
600  // The next issue is how to safely do the division by 2^T.  The way this
601  // is done is by doing the multiplication step at a width of at least W + T
602  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
603  // when we perform the division by 2^T (which is equivalent to a right shift
604  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
605  // truncated out after the division by 2^T.
606  //
607  // In comparison to just directly using the first formula, this technique
608  // is much more efficient; using the first formula requires W * K bits,
609  // but this formula less than W + K bits. Also, the first formula requires
610  // a division step, whereas this formula only requires multiplies and shifts.
611  //
612  // It doesn't matter whether the subtraction step is done in the calculation
613  // width or the input iteration count's width; if the subtraction overflows,
614  // the result must be zero anyway.  We prefer here to do it in the width of
615  // the induction variable because it helps a lot for certain cases; CodeGen
616  // isn't smart enough to ignore the overflow, which leads to much less
617  // efficient code if the width of the subtraction is wider than the native
618  // register width.
619  //
620  // (It's possible to not widen at all by pulling out factors of 2 before
621  // the multiplication; for example, K=2 can be calculated as
622  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
623  // extra arithmetic, so it's not an obvious win, and it gets
624  // much more complicated for K > 3.)
625
626  // Protection from insane SCEVs; this bound is conservative,
627  // but it probably doesn't matter.
628  if (K > 1000)
629    return SE.getCouldNotCompute();
630
631  unsigned W = SE.getTypeSizeInBits(ResultTy);
632
633  // Calculate K! / 2^T and T; we divide out the factors of two before
634  // multiplying for calculating K! / 2^T to avoid overflow.
635  // Other overflow doesn't matter because we only care about the bottom
636  // W bits of the result.
637  APInt OddFactorial(W, 1);
638  unsigned T = 1;
639  for (unsigned i = 3; i <= K; ++i) {
640    APInt Mult(W, i);
641    unsigned TwoFactors = Mult.countTrailingZeros();
642    T += TwoFactors;
643    Mult = Mult.lshr(TwoFactors);
644    OddFactorial *= Mult;
645  }
646
647  // We need at least W + T bits for the multiplication step
648  unsigned CalculationBits = W + T;
649
650  // Calcuate 2^T, at width T+W.
651  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
652
653  // Calculate the multiplicative inverse of K! / 2^T;
654  // this multiplication factor will perform the exact division by
655  // K! / 2^T.
656  APInt Mod = APInt::getSignedMinValue(W+1);
657  APInt MultiplyFactor = OddFactorial.zext(W+1);
658  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
659  MultiplyFactor = MultiplyFactor.trunc(W);
660
661  // Calculate the product, at width T+W
662  const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
663  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
664  for (unsigned i = 1; i != K; ++i) {
665    const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
666    Dividend = SE.getMulExpr(Dividend,
667                             SE.getTruncateOrZeroExtend(S, CalculationTy));
668  }
669
670  // Divide by 2^T
671  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
672
673  // Truncate the result, and divide by K! / 2^T.
674
675  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
676                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
677}
678
679/// evaluateAtIteration - Return the value of this chain of recurrences at
680/// the specified iteration number.  We can evaluate this recurrence by
681/// multiplying each element in the chain by the binomial coefficient
682/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
683///
684///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
685///
686/// where BC(It, k) stands for binomial coefficient.
687///
688const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
689                                               ScalarEvolution &SE) const {
690  const SCEV *Result = getStart();
691  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
692    // The computation is correct in the face of overflow provided that the
693    // multiplication is performed _after_ the evaluation of the binomial
694    // coefficient.
695    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
696    if (isa<SCEVCouldNotCompute>(Coeff))
697      return Coeff;
698
699    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
700  }
701  return Result;
702}
703
704//===----------------------------------------------------------------------===//
705//                    SCEV Expression folder implementations
706//===----------------------------------------------------------------------===//
707
708const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
709                                             const Type *Ty) {
710  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
711         "This is not a truncating conversion!");
712  assert(isSCEVable(Ty) &&
713         "This is not a conversion to a SCEVable type!");
714  Ty = getEffectiveSCEVType(Ty);
715
716  FoldingSetNodeID ID;
717  ID.AddInteger(scTruncate);
718  ID.AddPointer(Op);
719  ID.AddPointer(Ty);
720  void *IP = 0;
721  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
722
723  // Fold if the operand is constant.
724  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
725    return getConstant(
726      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
727
728  // trunc(trunc(x)) --> trunc(x)
729  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
730    return getTruncateExpr(ST->getOperand(), Ty);
731
732  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
733  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
734    return getTruncateOrSignExtend(SS->getOperand(), Ty);
735
736  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
737  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
738    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
739
740  // If the input value is a chrec scev, truncate the chrec's operands.
741  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
742    SmallVector<const SCEV *, 4> Operands;
743    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
744      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
745    return getAddRecExpr(Operands, AddRec->getLoop());
746  }
747
748  // The cast wasn't folded; create an explicit cast node.
749  // Recompute the insert position, as it may have been invalidated.
750  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
751  SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
752  new (S) SCEVTruncateExpr(ID, Op, Ty);
753  UniqueSCEVs.InsertNode(S, IP);
754  return S;
755}
756
757const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
758                                               const Type *Ty) {
759  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
760         "This is not an extending conversion!");
761  assert(isSCEVable(Ty) &&
762         "This is not a conversion to a SCEVable type!");
763  Ty = getEffectiveSCEVType(Ty);
764
765  // Fold if the operand is constant.
766  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
767    const Type *IntTy = getEffectiveSCEVType(Ty);
768    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
769    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
770    return getConstant(cast<ConstantInt>(C));
771  }
772
773  // zext(zext(x)) --> zext(x)
774  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
775    return getZeroExtendExpr(SZ->getOperand(), Ty);
776
777  // Before doing any expensive analysis, check to see if we've already
778  // computed a SCEV for this Op and Ty.
779  FoldingSetNodeID ID;
780  ID.AddInteger(scZeroExtend);
781  ID.AddPointer(Op);
782  ID.AddPointer(Ty);
783  void *IP = 0;
784  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
785
786  // If the input value is a chrec scev, and we can prove that the value
787  // did not overflow the old, smaller, value, we can zero extend all of the
788  // operands (often constants).  This allows analysis of something like
789  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
790  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
791    if (AR->isAffine()) {
792      const SCEV *Start = AR->getStart();
793      const SCEV *Step = AR->getStepRecurrence(*this);
794      unsigned BitWidth = getTypeSizeInBits(AR->getType());
795      const Loop *L = AR->getLoop();
796
797      // Check whether the backedge-taken count is SCEVCouldNotCompute.
798      // Note that this serves two purposes: It filters out loops that are
799      // simply not analyzable, and it covers the case where this code is
800      // being called from within backedge-taken count analysis, such that
801      // attempting to ask for the backedge-taken count would likely result
802      // in infinite recursion. In the later case, the analysis code will
803      // cope with a conservative value, and it will take care to purge
804      // that value once it has finished.
805      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
806      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
807        // Manually compute the final value for AR, checking for
808        // overflow.
809
810        // Check whether the backedge-taken count can be losslessly casted to
811        // the addrec's type. The count is always unsigned.
812        const SCEV *CastedMaxBECount =
813          getTruncateOrZeroExtend(MaxBECount, Start->getType());
814        const SCEV *RecastedMaxBECount =
815          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
816        if (MaxBECount == RecastedMaxBECount) {
817          const Type *WideTy = IntegerType::get(BitWidth * 2);
818          // Check whether Start+Step*MaxBECount has no unsigned overflow.
819          const SCEV *ZMul =
820            getMulExpr(CastedMaxBECount,
821                       getTruncateOrZeroExtend(Step, Start->getType()));
822          const SCEV *Add = getAddExpr(Start, ZMul);
823          const SCEV *OperandExtendedAdd =
824            getAddExpr(getZeroExtendExpr(Start, WideTy),
825                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
826                                  getZeroExtendExpr(Step, WideTy)));
827          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
828            // Return the expression with the addrec on the outside.
829            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
830                                 getZeroExtendExpr(Step, Ty),
831                                 L);
832
833          // Similar to above, only this time treat the step value as signed.
834          // This covers loops that count down.
835          const SCEV *SMul =
836            getMulExpr(CastedMaxBECount,
837                       getTruncateOrSignExtend(Step, Start->getType()));
838          Add = getAddExpr(Start, SMul);
839          OperandExtendedAdd =
840            getAddExpr(getZeroExtendExpr(Start, WideTy),
841                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
842                                  getSignExtendExpr(Step, WideTy)));
843          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
844            // Return the expression with the addrec on the outside.
845            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
846                                 getSignExtendExpr(Step, Ty),
847                                 L);
848        }
849
850        // If the backedge is guarded by a comparison with the pre-inc value
851        // the addrec is safe. Also, if the entry is guarded by a comparison
852        // with the start value and the backedge is guarded by a comparison
853        // with the post-inc value, the addrec is safe.
854        if (isKnownPositive(Step)) {
855          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
856                                      getUnsignedRange(Step).getUnsignedMax());
857          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
858              (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
859               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
860                                           AR->getPostIncExpr(*this), N)))
861            // Return the expression with the addrec on the outside.
862            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
863                                 getZeroExtendExpr(Step, Ty),
864                                 L);
865        } else if (isKnownNegative(Step)) {
866          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
867                                      getSignedRange(Step).getSignedMin());
868          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
869              (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
870               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
871                                           AR->getPostIncExpr(*this), N)))
872            // Return the expression with the addrec on the outside.
873            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
874                                 getSignExtendExpr(Step, Ty),
875                                 L);
876        }
877      }
878    }
879
880  // The cast wasn't folded; create an explicit cast node.
881  // Recompute the insert position, as it may have been invalidated.
882  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
883  SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
884  new (S) SCEVZeroExtendExpr(ID, Op, Ty);
885  UniqueSCEVs.InsertNode(S, IP);
886  return S;
887}
888
889const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
890                                               const Type *Ty) {
891  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
892         "This is not an extending conversion!");
893  assert(isSCEVable(Ty) &&
894         "This is not a conversion to a SCEVable type!");
895  Ty = getEffectiveSCEVType(Ty);
896
897  // Fold if the operand is constant.
898  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
899    const Type *IntTy = getEffectiveSCEVType(Ty);
900    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
901    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
902    return getConstant(cast<ConstantInt>(C));
903  }
904
905  // sext(sext(x)) --> sext(x)
906  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
907    return getSignExtendExpr(SS->getOperand(), Ty);
908
909  // Before doing any expensive analysis, check to see if we've already
910  // computed a SCEV for this Op and Ty.
911  FoldingSetNodeID ID;
912  ID.AddInteger(scSignExtend);
913  ID.AddPointer(Op);
914  ID.AddPointer(Ty);
915  void *IP = 0;
916  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
917
918  // If the input value is a chrec scev, and we can prove that the value
919  // did not overflow the old, smaller, value, we can sign extend all of the
920  // operands (often constants).  This allows analysis of something like
921  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
922  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
923    if (AR->isAffine()) {
924      const SCEV *Start = AR->getStart();
925      const SCEV *Step = AR->getStepRecurrence(*this);
926      unsigned BitWidth = getTypeSizeInBits(AR->getType());
927      const Loop *L = AR->getLoop();
928
929      // Check whether the backedge-taken count is SCEVCouldNotCompute.
930      // Note that this serves two purposes: It filters out loops that are
931      // simply not analyzable, and it covers the case where this code is
932      // being called from within backedge-taken count analysis, such that
933      // attempting to ask for the backedge-taken count would likely result
934      // in infinite recursion. In the later case, the analysis code will
935      // cope with a conservative value, and it will take care to purge
936      // that value once it has finished.
937      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
938      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
939        // Manually compute the final value for AR, checking for
940        // overflow.
941
942        // Check whether the backedge-taken count can be losslessly casted to
943        // the addrec's type. The count is always unsigned.
944        const SCEV *CastedMaxBECount =
945          getTruncateOrZeroExtend(MaxBECount, Start->getType());
946        const SCEV *RecastedMaxBECount =
947          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
948        if (MaxBECount == RecastedMaxBECount) {
949          const Type *WideTy = IntegerType::get(BitWidth * 2);
950          // Check whether Start+Step*MaxBECount has no signed overflow.
951          const SCEV *SMul =
952            getMulExpr(CastedMaxBECount,
953                       getTruncateOrSignExtend(Step, Start->getType()));
954          const SCEV *Add = getAddExpr(Start, SMul);
955          const SCEV *OperandExtendedAdd =
956            getAddExpr(getSignExtendExpr(Start, WideTy),
957                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
958                                  getSignExtendExpr(Step, WideTy)));
959          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
960            // Return the expression with the addrec on the outside.
961            return getAddRecExpr(getSignExtendExpr(Start, Ty),
962                                 getSignExtendExpr(Step, Ty),
963                                 L);
964
965          // Similar to above, only this time treat the step value as unsigned.
966          // This covers loops that count up with an unsigned step.
967          const SCEV *UMul =
968            getMulExpr(CastedMaxBECount,
969                       getTruncateOrZeroExtend(Step, Start->getType()));
970          Add = getAddExpr(Start, UMul);
971          OperandExtendedAdd =
972            getAddExpr(getZeroExtendExpr(Start, WideTy),
973                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
974                                  getZeroExtendExpr(Step, WideTy)));
975          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
976            // Return the expression with the addrec on the outside.
977            return getAddRecExpr(getSignExtendExpr(Start, Ty),
978                                 getZeroExtendExpr(Step, Ty),
979                                 L);
980        }
981
982        // If the backedge is guarded by a comparison with the pre-inc value
983        // the addrec is safe. Also, if the entry is guarded by a comparison
984        // with the start value and the backedge is guarded by a comparison
985        // with the post-inc value, the addrec is safe.
986        if (isKnownPositive(Step)) {
987          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
988                                      getSignedRange(Step).getSignedMax());
989          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
990              (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
991               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
992                                           AR->getPostIncExpr(*this), N)))
993            // Return the expression with the addrec on the outside.
994            return getAddRecExpr(getSignExtendExpr(Start, Ty),
995                                 getSignExtendExpr(Step, Ty),
996                                 L);
997        } else if (isKnownNegative(Step)) {
998          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
999                                      getSignedRange(Step).getSignedMin());
1000          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1001              (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1002               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1003                                           AR->getPostIncExpr(*this), N)))
1004            // Return the expression with the addrec on the outside.
1005            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1006                                 getSignExtendExpr(Step, Ty),
1007                                 L);
1008        }
1009      }
1010    }
1011
1012  // The cast wasn't folded; create an explicit cast node.
1013  // Recompute the insert position, as it may have been invalidated.
1014  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1015  SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
1016  new (S) SCEVSignExtendExpr(ID, Op, Ty);
1017  UniqueSCEVs.InsertNode(S, IP);
1018  return S;
1019}
1020
1021/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1022/// unspecified bits out to the given type.
1023///
1024const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1025                                             const Type *Ty) {
1026  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1027         "This is not an extending conversion!");
1028  assert(isSCEVable(Ty) &&
1029         "This is not a conversion to a SCEVable type!");
1030  Ty = getEffectiveSCEVType(Ty);
1031
1032  // Sign-extend negative constants.
1033  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1034    if (SC->getValue()->getValue().isNegative())
1035      return getSignExtendExpr(Op, Ty);
1036
1037  // Peel off a truncate cast.
1038  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1039    const SCEV *NewOp = T->getOperand();
1040    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1041      return getAnyExtendExpr(NewOp, Ty);
1042    return getTruncateOrNoop(NewOp, Ty);
1043  }
1044
1045  // Next try a zext cast. If the cast is folded, use it.
1046  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1047  if (!isa<SCEVZeroExtendExpr>(ZExt))
1048    return ZExt;
1049
1050  // Next try a sext cast. If the cast is folded, use it.
1051  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1052  if (!isa<SCEVSignExtendExpr>(SExt))
1053    return SExt;
1054
1055  // If the expression is obviously signed, use the sext cast value.
1056  if (isa<SCEVSMaxExpr>(Op))
1057    return SExt;
1058
1059  // Absent any other information, use the zext cast value.
1060  return ZExt;
1061}
1062
1063/// CollectAddOperandsWithScales - Process the given Ops list, which is
1064/// a list of operands to be added under the given scale, update the given
1065/// map. This is a helper function for getAddRecExpr. As an example of
1066/// what it does, given a sequence of operands that would form an add
1067/// expression like this:
1068///
1069///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1070///
1071/// where A and B are constants, update the map with these values:
1072///
1073///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1074///
1075/// and add 13 + A*B*29 to AccumulatedConstant.
1076/// This will allow getAddRecExpr to produce this:
1077///
1078///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1079///
1080/// This form often exposes folding opportunities that are hidden in
1081/// the original operand list.
1082///
1083/// Return true iff it appears that any interesting folding opportunities
1084/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1085/// the common case where no interesting opportunities are present, and
1086/// is also used as a check to avoid infinite recursion.
1087///
1088static bool
1089CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1090                             SmallVector<const SCEV *, 8> &NewOps,
1091                             APInt &AccumulatedConstant,
1092                             const SmallVectorImpl<const SCEV *> &Ops,
1093                             const APInt &Scale,
1094                             ScalarEvolution &SE) {
1095  bool Interesting = false;
1096
1097  // Iterate over the add operands.
1098  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1099    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1100    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1101      APInt NewScale =
1102        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1103      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1104        // A multiplication of a constant with another add; recurse.
1105        Interesting |=
1106          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1107                                       cast<SCEVAddExpr>(Mul->getOperand(1))
1108                                         ->getOperands(),
1109                                       NewScale, SE);
1110      } else {
1111        // A multiplication of a constant with some other value. Update
1112        // the map.
1113        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1114        const SCEV *Key = SE.getMulExpr(MulOps);
1115        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1116          M.insert(std::make_pair(Key, NewScale));
1117        if (Pair.second) {
1118          NewOps.push_back(Pair.first->first);
1119        } else {
1120          Pair.first->second += NewScale;
1121          // The map already had an entry for this value, which may indicate
1122          // a folding opportunity.
1123          Interesting = true;
1124        }
1125      }
1126    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1127      // Pull a buried constant out to the outside.
1128      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1129        Interesting = true;
1130      AccumulatedConstant += Scale * C->getValue()->getValue();
1131    } else {
1132      // An ordinary operand. Update the map.
1133      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1134        M.insert(std::make_pair(Ops[i], Scale));
1135      if (Pair.second) {
1136        NewOps.push_back(Pair.first->first);
1137      } else {
1138        Pair.first->second += Scale;
1139        // The map already had an entry for this value, which may indicate
1140        // a folding opportunity.
1141        Interesting = true;
1142      }
1143    }
1144  }
1145
1146  return Interesting;
1147}
1148
1149namespace {
1150  struct APIntCompare {
1151    bool operator()(const APInt &LHS, const APInt &RHS) const {
1152      return LHS.ult(RHS);
1153    }
1154  };
1155}
1156
1157/// getAddExpr - Get a canonical add expression, or something simpler if
1158/// possible.
1159const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1160  assert(!Ops.empty() && "Cannot get empty add!");
1161  if (Ops.size() == 1) return Ops[0];
1162#ifndef NDEBUG
1163  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1164    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1165           getEffectiveSCEVType(Ops[0]->getType()) &&
1166           "SCEVAddExpr operand types don't match!");
1167#endif
1168
1169  // Sort by complexity, this groups all similar expression types together.
1170  GroupByComplexity(Ops, LI);
1171
1172  // If there are any constants, fold them together.
1173  unsigned Idx = 0;
1174  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1175    ++Idx;
1176    assert(Idx < Ops.size());
1177    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1178      // We found two constants, fold them together!
1179      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1180                           RHSC->getValue()->getValue());
1181      if (Ops.size() == 2) return Ops[0];
1182      Ops.erase(Ops.begin()+1);  // Erase the folded element
1183      LHSC = cast<SCEVConstant>(Ops[0]);
1184    }
1185
1186    // If we are left with a constant zero being added, strip it off.
1187    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1188      Ops.erase(Ops.begin());
1189      --Idx;
1190    }
1191  }
1192
1193  if (Ops.size() == 1) return Ops[0];
1194
1195  // Okay, check to see if the same value occurs in the operand list twice.  If
1196  // so, merge them together into an multiply expression.  Since we sorted the
1197  // list, these values are required to be adjacent.
1198  const Type *Ty = Ops[0]->getType();
1199  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1200    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1201      // Found a match, merge the two values into a multiply, and add any
1202      // remaining values to the result.
1203      const SCEV *Two = getIntegerSCEV(2, Ty);
1204      const SCEV *Mul = getMulExpr(Ops[i], Two);
1205      if (Ops.size() == 2)
1206        return Mul;
1207      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1208      Ops.push_back(Mul);
1209      return getAddExpr(Ops);
1210    }
1211
1212  // Check for truncates. If all the operands are truncated from the same
1213  // type, see if factoring out the truncate would permit the result to be
1214  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1215  // if the contents of the resulting outer trunc fold to something simple.
1216  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1217    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1218    const Type *DstType = Trunc->getType();
1219    const Type *SrcType = Trunc->getOperand()->getType();
1220    SmallVector<const SCEV *, 8> LargeOps;
1221    bool Ok = true;
1222    // Check all the operands to see if they can be represented in the
1223    // source type of the truncate.
1224    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1225      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1226        if (T->getOperand()->getType() != SrcType) {
1227          Ok = false;
1228          break;
1229        }
1230        LargeOps.push_back(T->getOperand());
1231      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1232        // This could be either sign or zero extension, but sign extension
1233        // is much more likely to be foldable here.
1234        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1235      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1236        SmallVector<const SCEV *, 8> LargeMulOps;
1237        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1238          if (const SCEVTruncateExpr *T =
1239                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1240            if (T->getOperand()->getType() != SrcType) {
1241              Ok = false;
1242              break;
1243            }
1244            LargeMulOps.push_back(T->getOperand());
1245          } else if (const SCEVConstant *C =
1246                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1247            // This could be either sign or zero extension, but sign extension
1248            // is much more likely to be foldable here.
1249            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1250          } else {
1251            Ok = false;
1252            break;
1253          }
1254        }
1255        if (Ok)
1256          LargeOps.push_back(getMulExpr(LargeMulOps));
1257      } else {
1258        Ok = false;
1259        break;
1260      }
1261    }
1262    if (Ok) {
1263      // Evaluate the expression in the larger type.
1264      const SCEV *Fold = getAddExpr(LargeOps);
1265      // If it folds to something simple, use it. Otherwise, don't.
1266      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1267        return getTruncateExpr(Fold, DstType);
1268    }
1269  }
1270
1271  // Skip past any other cast SCEVs.
1272  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1273    ++Idx;
1274
1275  // If there are add operands they would be next.
1276  if (Idx < Ops.size()) {
1277    bool DeletedAdd = false;
1278    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1279      // If we have an add, expand the add operands onto the end of the operands
1280      // list.
1281      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1282      Ops.erase(Ops.begin()+Idx);
1283      DeletedAdd = true;
1284    }
1285
1286    // If we deleted at least one add, we added operands to the end of the list,
1287    // and they are not necessarily sorted.  Recurse to resort and resimplify
1288    // any operands we just aquired.
1289    if (DeletedAdd)
1290      return getAddExpr(Ops);
1291  }
1292
1293  // Skip over the add expression until we get to a multiply.
1294  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1295    ++Idx;
1296
1297  // Check to see if there are any folding opportunities present with
1298  // operands multiplied by constant values.
1299  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1300    uint64_t BitWidth = getTypeSizeInBits(Ty);
1301    DenseMap<const SCEV *, APInt> M;
1302    SmallVector<const SCEV *, 8> NewOps;
1303    APInt AccumulatedConstant(BitWidth, 0);
1304    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1305                                     Ops, APInt(BitWidth, 1), *this)) {
1306      // Some interesting folding opportunity is present, so its worthwhile to
1307      // re-generate the operands list. Group the operands by constant scale,
1308      // to avoid multiplying by the same constant scale multiple times.
1309      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1310      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1311           E = NewOps.end(); I != E; ++I)
1312        MulOpLists[M.find(*I)->second].push_back(*I);
1313      // Re-generate the operands list.
1314      Ops.clear();
1315      if (AccumulatedConstant != 0)
1316        Ops.push_back(getConstant(AccumulatedConstant));
1317      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1318           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1319        if (I->first != 0)
1320          Ops.push_back(getMulExpr(getConstant(I->first),
1321                                   getAddExpr(I->second)));
1322      if (Ops.empty())
1323        return getIntegerSCEV(0, Ty);
1324      if (Ops.size() == 1)
1325        return Ops[0];
1326      return getAddExpr(Ops);
1327    }
1328  }
1329
1330  // If we are adding something to a multiply expression, make sure the
1331  // something is not already an operand of the multiply.  If so, merge it into
1332  // the multiply.
1333  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1334    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1335    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1336      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1337      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1338        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1339          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1340          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1341          if (Mul->getNumOperands() != 2) {
1342            // If the multiply has more than two operands, we must get the
1343            // Y*Z term.
1344            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1345            MulOps.erase(MulOps.begin()+MulOp);
1346            InnerMul = getMulExpr(MulOps);
1347          }
1348          const SCEV *One = getIntegerSCEV(1, Ty);
1349          const SCEV *AddOne = getAddExpr(InnerMul, One);
1350          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1351          if (Ops.size() == 2) return OuterMul;
1352          if (AddOp < Idx) {
1353            Ops.erase(Ops.begin()+AddOp);
1354            Ops.erase(Ops.begin()+Idx-1);
1355          } else {
1356            Ops.erase(Ops.begin()+Idx);
1357            Ops.erase(Ops.begin()+AddOp-1);
1358          }
1359          Ops.push_back(OuterMul);
1360          return getAddExpr(Ops);
1361        }
1362
1363      // Check this multiply against other multiplies being added together.
1364      for (unsigned OtherMulIdx = Idx+1;
1365           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1366           ++OtherMulIdx) {
1367        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1368        // If MulOp occurs in OtherMul, we can fold the two multiplies
1369        // together.
1370        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1371             OMulOp != e; ++OMulOp)
1372          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1373            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1374            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1375            if (Mul->getNumOperands() != 2) {
1376              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1377                                                  Mul->op_end());
1378              MulOps.erase(MulOps.begin()+MulOp);
1379              InnerMul1 = getMulExpr(MulOps);
1380            }
1381            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1382            if (OtherMul->getNumOperands() != 2) {
1383              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1384                                                  OtherMul->op_end());
1385              MulOps.erase(MulOps.begin()+OMulOp);
1386              InnerMul2 = getMulExpr(MulOps);
1387            }
1388            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1389            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1390            if (Ops.size() == 2) return OuterMul;
1391            Ops.erase(Ops.begin()+Idx);
1392            Ops.erase(Ops.begin()+OtherMulIdx-1);
1393            Ops.push_back(OuterMul);
1394            return getAddExpr(Ops);
1395          }
1396      }
1397    }
1398  }
1399
1400  // If there are any add recurrences in the operands list, see if any other
1401  // added values are loop invariant.  If so, we can fold them into the
1402  // recurrence.
1403  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1404    ++Idx;
1405
1406  // Scan over all recurrences, trying to fold loop invariants into them.
1407  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1408    // Scan all of the other operands to this add and add them to the vector if
1409    // they are loop invariant w.r.t. the recurrence.
1410    SmallVector<const SCEV *, 8> LIOps;
1411    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1412    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1413      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1414        LIOps.push_back(Ops[i]);
1415        Ops.erase(Ops.begin()+i);
1416        --i; --e;
1417      }
1418
1419    // If we found some loop invariants, fold them into the recurrence.
1420    if (!LIOps.empty()) {
1421      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1422      LIOps.push_back(AddRec->getStart());
1423
1424      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1425                                           AddRec->op_end());
1426      AddRecOps[0] = getAddExpr(LIOps);
1427
1428      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1429      // If all of the other operands were loop invariant, we are done.
1430      if (Ops.size() == 1) return NewRec;
1431
1432      // Otherwise, add the folded AddRec by the non-liv parts.
1433      for (unsigned i = 0;; ++i)
1434        if (Ops[i] == AddRec) {
1435          Ops[i] = NewRec;
1436          break;
1437        }
1438      return getAddExpr(Ops);
1439    }
1440
1441    // Okay, if there weren't any loop invariants to be folded, check to see if
1442    // there are multiple AddRec's with the same loop induction variable being
1443    // added together.  If so, we can fold them.
1444    for (unsigned OtherIdx = Idx+1;
1445         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1446      if (OtherIdx != Idx) {
1447        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1448        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1449          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1450          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1451                                              AddRec->op_end());
1452          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1453            if (i >= NewOps.size()) {
1454              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1455                            OtherAddRec->op_end());
1456              break;
1457            }
1458            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1459          }
1460          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1461
1462          if (Ops.size() == 2) return NewAddRec;
1463
1464          Ops.erase(Ops.begin()+Idx);
1465          Ops.erase(Ops.begin()+OtherIdx-1);
1466          Ops.push_back(NewAddRec);
1467          return getAddExpr(Ops);
1468        }
1469      }
1470
1471    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1472    // next one.
1473  }
1474
1475  // Okay, it looks like we really DO need an add expr.  Check to see if we
1476  // already have one, otherwise create a new one.
1477  FoldingSetNodeID ID;
1478  ID.AddInteger(scAddExpr);
1479  ID.AddInteger(Ops.size());
1480  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1481    ID.AddPointer(Ops[i]);
1482  void *IP = 0;
1483  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1484  SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1485  new (S) SCEVAddExpr(ID, Ops);
1486  UniqueSCEVs.InsertNode(S, IP);
1487  return S;
1488}
1489
1490
1491/// getMulExpr - Get a canonical multiply expression, or something simpler if
1492/// possible.
1493const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1494  assert(!Ops.empty() && "Cannot get empty mul!");
1495#ifndef NDEBUG
1496  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1497    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1498           getEffectiveSCEVType(Ops[0]->getType()) &&
1499           "SCEVMulExpr operand types don't match!");
1500#endif
1501
1502  // Sort by complexity, this groups all similar expression types together.
1503  GroupByComplexity(Ops, LI);
1504
1505  // If there are any constants, fold them together.
1506  unsigned Idx = 0;
1507  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1508
1509    // C1*(C2+V) -> C1*C2 + C1*V
1510    if (Ops.size() == 2)
1511      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1512        if (Add->getNumOperands() == 2 &&
1513            isa<SCEVConstant>(Add->getOperand(0)))
1514          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1515                            getMulExpr(LHSC, Add->getOperand(1)));
1516
1517
1518    ++Idx;
1519    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1520      // We found two constants, fold them together!
1521      ConstantInt *Fold = Context->getConstantInt(LHSC->getValue()->getValue() *
1522                                           RHSC->getValue()->getValue());
1523      Ops[0] = getConstant(Fold);
1524      Ops.erase(Ops.begin()+1);  // Erase the folded element
1525      if (Ops.size() == 1) return Ops[0];
1526      LHSC = cast<SCEVConstant>(Ops[0]);
1527    }
1528
1529    // If we are left with a constant one being multiplied, strip it off.
1530    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1531      Ops.erase(Ops.begin());
1532      --Idx;
1533    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1534      // If we have a multiply of zero, it will always be zero.
1535      return Ops[0];
1536    }
1537  }
1538
1539  // Skip over the add expression until we get to a multiply.
1540  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1541    ++Idx;
1542
1543  if (Ops.size() == 1)
1544    return Ops[0];
1545
1546  // If there are mul operands inline them all into this expression.
1547  if (Idx < Ops.size()) {
1548    bool DeletedMul = false;
1549    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1550      // If we have an mul, expand the mul operands onto the end of the operands
1551      // list.
1552      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1553      Ops.erase(Ops.begin()+Idx);
1554      DeletedMul = true;
1555    }
1556
1557    // If we deleted at least one mul, we added operands to the end of the list,
1558    // and they are not necessarily sorted.  Recurse to resort and resimplify
1559    // any operands we just aquired.
1560    if (DeletedMul)
1561      return getMulExpr(Ops);
1562  }
1563
1564  // If there are any add recurrences in the operands list, see if any other
1565  // added values are loop invariant.  If so, we can fold them into the
1566  // recurrence.
1567  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1568    ++Idx;
1569
1570  // Scan over all recurrences, trying to fold loop invariants into them.
1571  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1572    // Scan all of the other operands to this mul and add them to the vector if
1573    // they are loop invariant w.r.t. the recurrence.
1574    SmallVector<const SCEV *, 8> LIOps;
1575    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1576    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1577      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1578        LIOps.push_back(Ops[i]);
1579        Ops.erase(Ops.begin()+i);
1580        --i; --e;
1581      }
1582
1583    // If we found some loop invariants, fold them into the recurrence.
1584    if (!LIOps.empty()) {
1585      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1586      SmallVector<const SCEV *, 4> NewOps;
1587      NewOps.reserve(AddRec->getNumOperands());
1588      if (LIOps.size() == 1) {
1589        const SCEV *Scale = LIOps[0];
1590        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1591          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1592      } else {
1593        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1594          SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1595          MulOps.push_back(AddRec->getOperand(i));
1596          NewOps.push_back(getMulExpr(MulOps));
1597        }
1598      }
1599
1600      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1601
1602      // If all of the other operands were loop invariant, we are done.
1603      if (Ops.size() == 1) return NewRec;
1604
1605      // Otherwise, multiply the folded AddRec by the non-liv parts.
1606      for (unsigned i = 0;; ++i)
1607        if (Ops[i] == AddRec) {
1608          Ops[i] = NewRec;
1609          break;
1610        }
1611      return getMulExpr(Ops);
1612    }
1613
1614    // Okay, if there weren't any loop invariants to be folded, check to see if
1615    // there are multiple AddRec's with the same loop induction variable being
1616    // multiplied together.  If so, we can fold them.
1617    for (unsigned OtherIdx = Idx+1;
1618         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1619      if (OtherIdx != Idx) {
1620        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1621        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1622          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1623          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1624          const SCEV *NewStart = getMulExpr(F->getStart(),
1625                                                 G->getStart());
1626          const SCEV *B = F->getStepRecurrence(*this);
1627          const SCEV *D = G->getStepRecurrence(*this);
1628          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1629                                          getMulExpr(G, B),
1630                                          getMulExpr(B, D));
1631          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1632                                               F->getLoop());
1633          if (Ops.size() == 2) return NewAddRec;
1634
1635          Ops.erase(Ops.begin()+Idx);
1636          Ops.erase(Ops.begin()+OtherIdx-1);
1637          Ops.push_back(NewAddRec);
1638          return getMulExpr(Ops);
1639        }
1640      }
1641
1642    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1643    // next one.
1644  }
1645
1646  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1647  // already have one, otherwise create a new one.
1648  FoldingSetNodeID ID;
1649  ID.AddInteger(scMulExpr);
1650  ID.AddInteger(Ops.size());
1651  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1652    ID.AddPointer(Ops[i]);
1653  void *IP = 0;
1654  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1655  SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1656  new (S) SCEVMulExpr(ID, Ops);
1657  UniqueSCEVs.InsertNode(S, IP);
1658  return S;
1659}
1660
1661/// getUDivExpr - Get a canonical multiply expression, or something simpler if
1662/// possible.
1663const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1664                                         const SCEV *RHS) {
1665  assert(getEffectiveSCEVType(LHS->getType()) ==
1666         getEffectiveSCEVType(RHS->getType()) &&
1667         "SCEVUDivExpr operand types don't match!");
1668
1669  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1670    if (RHSC->getValue()->equalsInt(1))
1671      return LHS;                            // X udiv 1 --> x
1672    if (RHSC->isZero())
1673      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1674
1675    // Determine if the division can be folded into the operands of
1676    // its operands.
1677    // TODO: Generalize this to non-constants by using known-bits information.
1678    const Type *Ty = LHS->getType();
1679    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1680    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1681    // For non-power-of-two values, effectively round the value up to the
1682    // nearest power of two.
1683    if (!RHSC->getValue()->getValue().isPowerOf2())
1684      ++MaxShiftAmt;
1685    const IntegerType *ExtTy =
1686      IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1687    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1688    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1689      if (const SCEVConstant *Step =
1690            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1691        if (!Step->getValue()->getValue()
1692              .urem(RHSC->getValue()->getValue()) &&
1693            getZeroExtendExpr(AR, ExtTy) ==
1694            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1695                          getZeroExtendExpr(Step, ExtTy),
1696                          AR->getLoop())) {
1697          SmallVector<const SCEV *, 4> Operands;
1698          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1699            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1700          return getAddRecExpr(Operands, AR->getLoop());
1701        }
1702    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1703    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1704      SmallVector<const SCEV *, 4> Operands;
1705      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1706        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1707      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1708        // Find an operand that's safely divisible.
1709        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1710          const SCEV *Op = M->getOperand(i);
1711          const SCEV *Div = getUDivExpr(Op, RHSC);
1712          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1713            const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1714            Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1715                                                  MOperands.end());
1716            Operands[i] = Div;
1717            return getMulExpr(Operands);
1718          }
1719        }
1720    }
1721    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1722    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1723      SmallVector<const SCEV *, 4> Operands;
1724      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1725        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1726      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1727        Operands.clear();
1728        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1729          const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1730          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1731            break;
1732          Operands.push_back(Op);
1733        }
1734        if (Operands.size() == A->getNumOperands())
1735          return getAddExpr(Operands);
1736      }
1737    }
1738
1739    // Fold if both operands are constant.
1740    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1741      Constant *LHSCV = LHSC->getValue();
1742      Constant *RHSCV = RHSC->getValue();
1743      return getConstant(cast<ConstantInt>(Context->getConstantExprUDiv(LHSCV,
1744                                                                 RHSCV)));
1745    }
1746  }
1747
1748  FoldingSetNodeID ID;
1749  ID.AddInteger(scUDivExpr);
1750  ID.AddPointer(LHS);
1751  ID.AddPointer(RHS);
1752  void *IP = 0;
1753  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1754  SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1755  new (S) SCEVUDivExpr(ID, LHS, RHS);
1756  UniqueSCEVs.InsertNode(S, IP);
1757  return S;
1758}
1759
1760
1761/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1762/// Simplify the expression as much as possible.
1763const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1764                               const SCEV *Step, const Loop *L) {
1765  SmallVector<const SCEV *, 4> Operands;
1766  Operands.push_back(Start);
1767  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1768    if (StepChrec->getLoop() == L) {
1769      Operands.insert(Operands.end(), StepChrec->op_begin(),
1770                      StepChrec->op_end());
1771      return getAddRecExpr(Operands, L);
1772    }
1773
1774  Operands.push_back(Step);
1775  return getAddRecExpr(Operands, L);
1776}
1777
1778/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1779/// Simplify the expression as much as possible.
1780const SCEV *
1781ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1782                               const Loop *L) {
1783  if (Operands.size() == 1) return Operands[0];
1784#ifndef NDEBUG
1785  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1786    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1787           getEffectiveSCEVType(Operands[0]->getType()) &&
1788           "SCEVAddRecExpr operand types don't match!");
1789#endif
1790
1791  if (Operands.back()->isZero()) {
1792    Operands.pop_back();
1793    return getAddRecExpr(Operands, L);             // {X,+,0}  -->  X
1794  }
1795
1796  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1797  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1798    const Loop* NestedLoop = NestedAR->getLoop();
1799    if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1800      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1801                                                NestedAR->op_end());
1802      Operands[0] = NestedAR->getStart();
1803      // AddRecs require their operands be loop-invariant with respect to their
1804      // loops. Don't perform this transformation if it would break this
1805      // requirement.
1806      bool AllInvariant = true;
1807      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1808        if (!Operands[i]->isLoopInvariant(L)) {
1809          AllInvariant = false;
1810          break;
1811        }
1812      if (AllInvariant) {
1813        NestedOperands[0] = getAddRecExpr(Operands, L);
1814        AllInvariant = true;
1815        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1816          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1817            AllInvariant = false;
1818            break;
1819          }
1820        if (AllInvariant)
1821          // Ok, both add recurrences are valid after the transformation.
1822          return getAddRecExpr(NestedOperands, NestedLoop);
1823      }
1824      // Reset Operands to its original state.
1825      Operands[0] = NestedAR;
1826    }
1827  }
1828
1829  FoldingSetNodeID ID;
1830  ID.AddInteger(scAddRecExpr);
1831  ID.AddInteger(Operands.size());
1832  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1833    ID.AddPointer(Operands[i]);
1834  ID.AddPointer(L);
1835  void *IP = 0;
1836  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1837  SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1838  new (S) SCEVAddRecExpr(ID, Operands, L);
1839  UniqueSCEVs.InsertNode(S, IP);
1840  return S;
1841}
1842
1843const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1844                                         const SCEV *RHS) {
1845  SmallVector<const SCEV *, 2> Ops;
1846  Ops.push_back(LHS);
1847  Ops.push_back(RHS);
1848  return getSMaxExpr(Ops);
1849}
1850
1851const SCEV *
1852ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1853  assert(!Ops.empty() && "Cannot get empty smax!");
1854  if (Ops.size() == 1) return Ops[0];
1855#ifndef NDEBUG
1856  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1857    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1858           getEffectiveSCEVType(Ops[0]->getType()) &&
1859           "SCEVSMaxExpr operand types don't match!");
1860#endif
1861
1862  // Sort by complexity, this groups all similar expression types together.
1863  GroupByComplexity(Ops, LI);
1864
1865  // If there are any constants, fold them together.
1866  unsigned Idx = 0;
1867  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1868    ++Idx;
1869    assert(Idx < Ops.size());
1870    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1871      // We found two constants, fold them together!
1872      ConstantInt *Fold = Context->getConstantInt(
1873                              APIntOps::smax(LHSC->getValue()->getValue(),
1874                                             RHSC->getValue()->getValue()));
1875      Ops[0] = getConstant(Fold);
1876      Ops.erase(Ops.begin()+1);  // Erase the folded element
1877      if (Ops.size() == 1) return Ops[0];
1878      LHSC = cast<SCEVConstant>(Ops[0]);
1879    }
1880
1881    // If we are left with a constant minimum-int, strip it off.
1882    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1883      Ops.erase(Ops.begin());
1884      --Idx;
1885    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1886      // If we have an smax with a constant maximum-int, it will always be
1887      // maximum-int.
1888      return Ops[0];
1889    }
1890  }
1891
1892  if (Ops.size() == 1) return Ops[0];
1893
1894  // Find the first SMax
1895  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1896    ++Idx;
1897
1898  // Check to see if one of the operands is an SMax. If so, expand its operands
1899  // onto our operand list, and recurse to simplify.
1900  if (Idx < Ops.size()) {
1901    bool DeletedSMax = false;
1902    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1903      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1904      Ops.erase(Ops.begin()+Idx);
1905      DeletedSMax = true;
1906    }
1907
1908    if (DeletedSMax)
1909      return getSMaxExpr(Ops);
1910  }
1911
1912  // Okay, check to see if the same value occurs in the operand list twice.  If
1913  // so, delete one.  Since we sorted the list, these values are required to
1914  // be adjacent.
1915  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1916    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
1917      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1918      --i; --e;
1919    }
1920
1921  if (Ops.size() == 1) return Ops[0];
1922
1923  assert(!Ops.empty() && "Reduced smax down to nothing!");
1924
1925  // Okay, it looks like we really DO need an smax expr.  Check to see if we
1926  // already have one, otherwise create a new one.
1927  FoldingSetNodeID ID;
1928  ID.AddInteger(scSMaxExpr);
1929  ID.AddInteger(Ops.size());
1930  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1931    ID.AddPointer(Ops[i]);
1932  void *IP = 0;
1933  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1934  SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1935  new (S) SCEVSMaxExpr(ID, Ops);
1936  UniqueSCEVs.InsertNode(S, IP);
1937  return S;
1938}
1939
1940const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1941                                         const SCEV *RHS) {
1942  SmallVector<const SCEV *, 2> Ops;
1943  Ops.push_back(LHS);
1944  Ops.push_back(RHS);
1945  return getUMaxExpr(Ops);
1946}
1947
1948const SCEV *
1949ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1950  assert(!Ops.empty() && "Cannot get empty umax!");
1951  if (Ops.size() == 1) return Ops[0];
1952#ifndef NDEBUG
1953  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1954    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1955           getEffectiveSCEVType(Ops[0]->getType()) &&
1956           "SCEVUMaxExpr operand types don't match!");
1957#endif
1958
1959  // Sort by complexity, this groups all similar expression types together.
1960  GroupByComplexity(Ops, LI);
1961
1962  // If there are any constants, fold them together.
1963  unsigned Idx = 0;
1964  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1965    ++Idx;
1966    assert(Idx < Ops.size());
1967    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1968      // We found two constants, fold them together!
1969      ConstantInt *Fold = Context->getConstantInt(
1970                              APIntOps::umax(LHSC->getValue()->getValue(),
1971                                             RHSC->getValue()->getValue()));
1972      Ops[0] = getConstant(Fold);
1973      Ops.erase(Ops.begin()+1);  // Erase the folded element
1974      if (Ops.size() == 1) return Ops[0];
1975      LHSC = cast<SCEVConstant>(Ops[0]);
1976    }
1977
1978    // If we are left with a constant minimum-int, strip it off.
1979    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1980      Ops.erase(Ops.begin());
1981      --Idx;
1982    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1983      // If we have an umax with a constant maximum-int, it will always be
1984      // maximum-int.
1985      return Ops[0];
1986    }
1987  }
1988
1989  if (Ops.size() == 1) return Ops[0];
1990
1991  // Find the first UMax
1992  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1993    ++Idx;
1994
1995  // Check to see if one of the operands is a UMax. If so, expand its operands
1996  // onto our operand list, and recurse to simplify.
1997  if (Idx < Ops.size()) {
1998    bool DeletedUMax = false;
1999    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2000      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
2001      Ops.erase(Ops.begin()+Idx);
2002      DeletedUMax = true;
2003    }
2004
2005    if (DeletedUMax)
2006      return getUMaxExpr(Ops);
2007  }
2008
2009  // Okay, check to see if the same value occurs in the operand list twice.  If
2010  // so, delete one.  Since we sorted the list, these values are required to
2011  // be adjacent.
2012  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2013    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
2014      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2015      --i; --e;
2016    }
2017
2018  if (Ops.size() == 1) return Ops[0];
2019
2020  assert(!Ops.empty() && "Reduced umax down to nothing!");
2021
2022  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2023  // already have one, otherwise create a new one.
2024  FoldingSetNodeID ID;
2025  ID.AddInteger(scUMaxExpr);
2026  ID.AddInteger(Ops.size());
2027  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2028    ID.AddPointer(Ops[i]);
2029  void *IP = 0;
2030  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2031  SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
2032  new (S) SCEVUMaxExpr(ID, Ops);
2033  UniqueSCEVs.InsertNode(S, IP);
2034  return S;
2035}
2036
2037const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2038                                         const SCEV *RHS) {
2039  // ~smax(~x, ~y) == smin(x, y).
2040  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2041}
2042
2043const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2044                                         const SCEV *RHS) {
2045  // ~umax(~x, ~y) == umin(x, y)
2046  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2047}
2048
2049const SCEV *ScalarEvolution::getUnknown(Value *V) {
2050  // Don't attempt to do anything other than create a SCEVUnknown object
2051  // here.  createSCEV only calls getUnknown after checking for all other
2052  // interesting possibilities, and any other code that calls getUnknown
2053  // is doing so in order to hide a value from SCEV canonicalization.
2054
2055  FoldingSetNodeID ID;
2056  ID.AddInteger(scUnknown);
2057  ID.AddPointer(V);
2058  void *IP = 0;
2059  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2060  SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2061  new (S) SCEVUnknown(ID, V);
2062  UniqueSCEVs.InsertNode(S, IP);
2063  return S;
2064}
2065
2066//===----------------------------------------------------------------------===//
2067//            Basic SCEV Analysis and PHI Idiom Recognition Code
2068//
2069
2070/// isSCEVable - Test if values of the given type are analyzable within
2071/// the SCEV framework. This primarily includes integer types, and it
2072/// can optionally include pointer types if the ScalarEvolution class
2073/// has access to target-specific information.
2074bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2075  // Integers are always SCEVable.
2076  if (Ty->isInteger())
2077    return true;
2078
2079  // Pointers are SCEVable if TargetData information is available
2080  // to provide pointer size information.
2081  if (isa<PointerType>(Ty))
2082    return TD != NULL;
2083
2084  // Otherwise it's not SCEVable.
2085  return false;
2086}
2087
2088/// getTypeSizeInBits - Return the size in bits of the specified type,
2089/// for which isSCEVable must return true.
2090uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2091  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2092
2093  // If we have a TargetData, use it!
2094  if (TD)
2095    return TD->getTypeSizeInBits(Ty);
2096
2097  // Otherwise, we support only integer types.
2098  assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2099  return Ty->getPrimitiveSizeInBits();
2100}
2101
2102/// getEffectiveSCEVType - Return a type with the same bitwidth as
2103/// the given type and which represents how SCEV will treat the given
2104/// type, for which isSCEVable must return true. For pointer types,
2105/// this is the pointer-sized integer type.
2106const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2107  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2108
2109  if (Ty->isInteger())
2110    return Ty;
2111
2112  assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2113  return TD->getIntPtrType();
2114}
2115
2116const SCEV *ScalarEvolution::getCouldNotCompute() {
2117  return &CouldNotCompute;
2118}
2119
2120/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2121/// expression and create a new one.
2122const SCEV *ScalarEvolution::getSCEV(Value *V) {
2123  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2124
2125  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2126  if (I != Scalars.end()) return I->second;
2127  const SCEV *S = createSCEV(V);
2128  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2129  return S;
2130}
2131
2132/// getIntegerSCEV - Given a SCEVable type, create a constant for the
2133/// specified signed integer value and return a SCEV for the constant.
2134const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2135  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2136  return getConstant(Context->getConstantInt(ITy, Val));
2137}
2138
2139/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2140///
2141const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2142  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2143    return getConstant(
2144               cast<ConstantInt>(Context->getConstantExprNeg(VC->getValue())));
2145
2146  const Type *Ty = V->getType();
2147  Ty = getEffectiveSCEVType(Ty);
2148  return getMulExpr(V,
2149                  getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty))));
2150}
2151
2152/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2153const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2154  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2155    return getConstant(
2156                cast<ConstantInt>(Context->getConstantExprNot(VC->getValue())));
2157
2158  const Type *Ty = V->getType();
2159  Ty = getEffectiveSCEVType(Ty);
2160  const SCEV *AllOnes =
2161                   getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty)));
2162  return getMinusSCEV(AllOnes, V);
2163}
2164
2165/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2166///
2167const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2168                                          const SCEV *RHS) {
2169  // X - Y --> X + -Y
2170  return getAddExpr(LHS, getNegativeSCEV(RHS));
2171}
2172
2173/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2174/// input value to the specified type.  If the type must be extended, it is zero
2175/// extended.
2176const SCEV *
2177ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2178                                         const Type *Ty) {
2179  const Type *SrcTy = V->getType();
2180  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2181         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2182         "Cannot truncate or zero extend with non-integer arguments!");
2183  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2184    return V;  // No conversion
2185  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2186    return getTruncateExpr(V, Ty);
2187  return getZeroExtendExpr(V, Ty);
2188}
2189
2190/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2191/// input value to the specified type.  If the type must be extended, it is sign
2192/// extended.
2193const SCEV *
2194ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2195                                         const Type *Ty) {
2196  const Type *SrcTy = V->getType();
2197  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2198         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2199         "Cannot truncate or zero extend with non-integer arguments!");
2200  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2201    return V;  // No conversion
2202  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2203    return getTruncateExpr(V, Ty);
2204  return getSignExtendExpr(V, Ty);
2205}
2206
2207/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2208/// input value to the specified type.  If the type must be extended, it is zero
2209/// extended.  The conversion must not be narrowing.
2210const SCEV *
2211ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2212  const Type *SrcTy = V->getType();
2213  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2214         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2215         "Cannot noop or zero extend with non-integer arguments!");
2216  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2217         "getNoopOrZeroExtend cannot truncate!");
2218  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2219    return V;  // No conversion
2220  return getZeroExtendExpr(V, Ty);
2221}
2222
2223/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2224/// input value to the specified type.  If the type must be extended, it is sign
2225/// extended.  The conversion must not be narrowing.
2226const SCEV *
2227ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2228  const Type *SrcTy = V->getType();
2229  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2230         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2231         "Cannot noop or sign extend with non-integer arguments!");
2232  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2233         "getNoopOrSignExtend cannot truncate!");
2234  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2235    return V;  // No conversion
2236  return getSignExtendExpr(V, Ty);
2237}
2238
2239/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2240/// the input value to the specified type. If the type must be extended,
2241/// it is extended with unspecified bits. The conversion must not be
2242/// narrowing.
2243const SCEV *
2244ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2245  const Type *SrcTy = V->getType();
2246  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2247         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2248         "Cannot noop or any extend with non-integer arguments!");
2249  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2250         "getNoopOrAnyExtend cannot truncate!");
2251  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2252    return V;  // No conversion
2253  return getAnyExtendExpr(V, Ty);
2254}
2255
2256/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2257/// input value to the specified type.  The conversion must not be widening.
2258const SCEV *
2259ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2260  const Type *SrcTy = V->getType();
2261  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2262         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2263         "Cannot truncate or noop with non-integer arguments!");
2264  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2265         "getTruncateOrNoop cannot extend!");
2266  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2267    return V;  // No conversion
2268  return getTruncateExpr(V, Ty);
2269}
2270
2271/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2272/// the types using zero-extension, and then perform a umax operation
2273/// with them.
2274const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2275                                                        const SCEV *RHS) {
2276  const SCEV *PromotedLHS = LHS;
2277  const SCEV *PromotedRHS = RHS;
2278
2279  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2280    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2281  else
2282    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2283
2284  return getUMaxExpr(PromotedLHS, PromotedRHS);
2285}
2286
2287/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2288/// the types using zero-extension, and then perform a umin operation
2289/// with them.
2290const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2291                                                        const SCEV *RHS) {
2292  const SCEV *PromotedLHS = LHS;
2293  const SCEV *PromotedRHS = RHS;
2294
2295  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2296    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2297  else
2298    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2299
2300  return getUMinExpr(PromotedLHS, PromotedRHS);
2301}
2302
2303/// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2304/// the specified instruction and replaces any references to the symbolic value
2305/// SymName with the specified value.  This is used during PHI resolution.
2306void
2307ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I,
2308                                                  const SCEV *SymName,
2309                                                  const SCEV *NewVal) {
2310  std::map<SCEVCallbackVH, const SCEV *>::iterator SI =
2311    Scalars.find(SCEVCallbackVH(I, this));
2312  if (SI == Scalars.end()) return;
2313
2314  const SCEV *NV =
2315    SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2316  if (NV == SI->second) return;  // No change.
2317
2318  SI->second = NV;       // Update the scalars map!
2319
2320  // Any instruction values that use this instruction might also need to be
2321  // updated!
2322  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2323       UI != E; ++UI)
2324    ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2325}
2326
2327/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2328/// a loop header, making it a potential recurrence, or it doesn't.
2329///
2330const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2331  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2332    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2333      if (L->getHeader() == PN->getParent()) {
2334        // If it lives in the loop header, it has two incoming values, one
2335        // from outside the loop, and one from inside.
2336        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2337        unsigned BackEdge     = IncomingEdge^1;
2338
2339        // While we are analyzing this PHI node, handle its value symbolically.
2340        const SCEV *SymbolicName = getUnknown(PN);
2341        assert(Scalars.find(PN) == Scalars.end() &&
2342               "PHI node already processed?");
2343        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2344
2345        // Using this symbolic name for the PHI, analyze the value coming around
2346        // the back-edge.
2347        const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2348
2349        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2350        // has a special value for the first iteration of the loop.
2351
2352        // If the value coming around the backedge is an add with the symbolic
2353        // value we just inserted, then we found a simple induction variable!
2354        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2355          // If there is a single occurrence of the symbolic value, replace it
2356          // with a recurrence.
2357          unsigned FoundIndex = Add->getNumOperands();
2358          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2359            if (Add->getOperand(i) == SymbolicName)
2360              if (FoundIndex == e) {
2361                FoundIndex = i;
2362                break;
2363              }
2364
2365          if (FoundIndex != Add->getNumOperands()) {
2366            // Create an add with everything but the specified operand.
2367            SmallVector<const SCEV *, 8> Ops;
2368            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2369              if (i != FoundIndex)
2370                Ops.push_back(Add->getOperand(i));
2371            const SCEV *Accum = getAddExpr(Ops);
2372
2373            // This is not a valid addrec if the step amount is varying each
2374            // loop iteration, but is not itself an addrec in this loop.
2375            if (Accum->isLoopInvariant(L) ||
2376                (isa<SCEVAddRecExpr>(Accum) &&
2377                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2378              const SCEV *StartVal =
2379                getSCEV(PN->getIncomingValue(IncomingEdge));
2380              const SCEV *PHISCEV =
2381                getAddRecExpr(StartVal, Accum, L);
2382
2383              // Okay, for the entire analysis of this edge we assumed the PHI
2384              // to be symbolic.  We now need to go back and update all of the
2385              // entries for the scalars that use the PHI (except for the PHI
2386              // itself) to use the new analyzed value instead of the "symbolic"
2387              // value.
2388              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2389              return PHISCEV;
2390            }
2391          }
2392        } else if (const SCEVAddRecExpr *AddRec =
2393                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2394          // Otherwise, this could be a loop like this:
2395          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2396          // In this case, j = {1,+,1}  and BEValue is j.
2397          // Because the other in-value of i (0) fits the evolution of BEValue
2398          // i really is an addrec evolution.
2399          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2400            const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2401
2402            // If StartVal = j.start - j.stride, we can use StartVal as the
2403            // initial step of the addrec evolution.
2404            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2405                                            AddRec->getOperand(1))) {
2406              const SCEV *PHISCEV =
2407                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2408
2409              // Okay, for the entire analysis of this edge we assumed the PHI
2410              // to be symbolic.  We now need to go back and update all of the
2411              // entries for the scalars that use the PHI (except for the PHI
2412              // itself) to use the new analyzed value instead of the "symbolic"
2413              // value.
2414              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2415              return PHISCEV;
2416            }
2417          }
2418        }
2419
2420        return SymbolicName;
2421      }
2422
2423  // It's tempting to recognize PHIs with a unique incoming value, however
2424  // this leads passes like indvars to break LCSSA form. Fortunately, such
2425  // PHIs are rare, as instcombine zaps them.
2426
2427  // If it's not a loop phi, we can't handle it yet.
2428  return getUnknown(PN);
2429}
2430
2431/// createNodeForGEP - Expand GEP instructions into add and multiply
2432/// operations. This allows them to be analyzed by regular SCEV code.
2433///
2434const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) {
2435
2436  const Type *IntPtrTy = TD->getIntPtrType();
2437  Value *Base = GEP->getOperand(0);
2438  // Don't attempt to analyze GEPs over unsized objects.
2439  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2440    return getUnknown(GEP);
2441  const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2442  gep_type_iterator GTI = gep_type_begin(GEP);
2443  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2444                                      E = GEP->op_end();
2445       I != E; ++I) {
2446    Value *Index = *I;
2447    // Compute the (potentially symbolic) offset in bytes for this index.
2448    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2449      // For a struct, add the member offset.
2450      const StructLayout &SL = *TD->getStructLayout(STy);
2451      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2452      uint64_t Offset = SL.getElementOffset(FieldNo);
2453      TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy));
2454    } else {
2455      // For an array, add the element offset, explicitly scaled.
2456      const SCEV *LocalOffset = getSCEV(Index);
2457      if (!isa<PointerType>(LocalOffset->getType()))
2458        // Getelementptr indicies are signed.
2459        LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2460      LocalOffset =
2461        getMulExpr(LocalOffset,
2462                   getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy));
2463      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2464    }
2465  }
2466  return getAddExpr(getSCEV(Base), TotalOffset);
2467}
2468
2469/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2470/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2471/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2472/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2473uint32_t
2474ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2475  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2476    return C->getValue()->getValue().countTrailingZeros();
2477
2478  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2479    return std::min(GetMinTrailingZeros(T->getOperand()),
2480                    (uint32_t)getTypeSizeInBits(T->getType()));
2481
2482  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2483    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2484    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2485             getTypeSizeInBits(E->getType()) : OpRes;
2486  }
2487
2488  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2489    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2490    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2491             getTypeSizeInBits(E->getType()) : OpRes;
2492  }
2493
2494  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2495    // The result is the min of all operands results.
2496    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2497    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2498      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2499    return MinOpRes;
2500  }
2501
2502  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2503    // The result is the sum of all operands results.
2504    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2505    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2506    for (unsigned i = 1, e = M->getNumOperands();
2507         SumOpRes != BitWidth && i != e; ++i)
2508      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2509                          BitWidth);
2510    return SumOpRes;
2511  }
2512
2513  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2514    // The result is the min of all operands results.
2515    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2516    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2517      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2518    return MinOpRes;
2519  }
2520
2521  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2522    // The result is the min of all operands results.
2523    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2524    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2525      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2526    return MinOpRes;
2527  }
2528
2529  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2530    // The result is the min of all operands results.
2531    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2532    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2533      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2534    return MinOpRes;
2535  }
2536
2537  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2538    // For a SCEVUnknown, ask ValueTracking.
2539    unsigned BitWidth = getTypeSizeInBits(U->getType());
2540    APInt Mask = APInt::getAllOnesValue(BitWidth);
2541    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2542    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2543    return Zeros.countTrailingOnes();
2544  }
2545
2546  // SCEVUDivExpr
2547  return 0;
2548}
2549
2550/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2551///
2552ConstantRange
2553ScalarEvolution::getUnsignedRange(const SCEV *S) {
2554
2555  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2556    return ConstantRange(C->getValue()->getValue());
2557
2558  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2559    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2560    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2561      X = X.add(getUnsignedRange(Add->getOperand(i)));
2562    return X;
2563  }
2564
2565  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2566    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2567    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2568      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2569    return X;
2570  }
2571
2572  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2573    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2574    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2575      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2576    return X;
2577  }
2578
2579  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2580    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2581    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2582      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2583    return X;
2584  }
2585
2586  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2587    ConstantRange X = getUnsignedRange(UDiv->getLHS());
2588    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2589    return X.udiv(Y);
2590  }
2591
2592  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2593    ConstantRange X = getUnsignedRange(ZExt->getOperand());
2594    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2595  }
2596
2597  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2598    ConstantRange X = getUnsignedRange(SExt->getOperand());
2599    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2600  }
2601
2602  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2603    ConstantRange X = getUnsignedRange(Trunc->getOperand());
2604    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2605  }
2606
2607  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2608
2609  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2610    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2611    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2612    if (!Trip) return FullSet;
2613
2614    // TODO: non-affine addrec
2615    if (AddRec->isAffine()) {
2616      const Type *Ty = AddRec->getType();
2617      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2618      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2619        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2620
2621        const SCEV *Start = AddRec->getStart();
2622        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2623
2624        // Check for overflow.
2625        if (!isKnownPredicate(ICmpInst::ICMP_ULE, Start, End))
2626          return FullSet;
2627
2628        ConstantRange StartRange = getUnsignedRange(Start);
2629        ConstantRange EndRange = getUnsignedRange(End);
2630        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2631                                   EndRange.getUnsignedMin());
2632        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2633                                   EndRange.getUnsignedMax());
2634        if (Min.isMinValue() && Max.isMaxValue())
2635          return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true);
2636        return ConstantRange(Min, Max+1);
2637      }
2638    }
2639  }
2640
2641  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2642    // For a SCEVUnknown, ask ValueTracking.
2643    unsigned BitWidth = getTypeSizeInBits(U->getType());
2644    APInt Mask = APInt::getAllOnesValue(BitWidth);
2645    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2646    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2647    return ConstantRange(Ones, ~Zeros);
2648  }
2649
2650  return FullSet;
2651}
2652
2653/// getSignedRange - Determine the signed range for a particular SCEV.
2654///
2655ConstantRange
2656ScalarEvolution::getSignedRange(const SCEV *S) {
2657
2658  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2659    return ConstantRange(C->getValue()->getValue());
2660
2661  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2662    ConstantRange X = getSignedRange(Add->getOperand(0));
2663    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2664      X = X.add(getSignedRange(Add->getOperand(i)));
2665    return X;
2666  }
2667
2668  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2669    ConstantRange X = getSignedRange(Mul->getOperand(0));
2670    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2671      X = X.multiply(getSignedRange(Mul->getOperand(i)));
2672    return X;
2673  }
2674
2675  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2676    ConstantRange X = getSignedRange(SMax->getOperand(0));
2677    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2678      X = X.smax(getSignedRange(SMax->getOperand(i)));
2679    return X;
2680  }
2681
2682  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2683    ConstantRange X = getSignedRange(UMax->getOperand(0));
2684    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2685      X = X.umax(getSignedRange(UMax->getOperand(i)));
2686    return X;
2687  }
2688
2689  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2690    ConstantRange X = getSignedRange(UDiv->getLHS());
2691    ConstantRange Y = getSignedRange(UDiv->getRHS());
2692    return X.udiv(Y);
2693  }
2694
2695  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2696    ConstantRange X = getSignedRange(ZExt->getOperand());
2697    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2698  }
2699
2700  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2701    ConstantRange X = getSignedRange(SExt->getOperand());
2702    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2703  }
2704
2705  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2706    ConstantRange X = getSignedRange(Trunc->getOperand());
2707    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2708  }
2709
2710  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2711
2712  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2713    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2714    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2715    if (!Trip) return FullSet;
2716
2717    // TODO: non-affine addrec
2718    if (AddRec->isAffine()) {
2719      const Type *Ty = AddRec->getType();
2720      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2721      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2722        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2723
2724        const SCEV *Start = AddRec->getStart();
2725        const SCEV *Step = AddRec->getStepRecurrence(*this);
2726        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2727
2728        // Check for overflow.
2729        if (!(isKnownPositive(Step) &&
2730              isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) &&
2731            !(isKnownNegative(Step) &&
2732              isKnownPredicate(ICmpInst::ICMP_SGT, Start, End)))
2733          return FullSet;
2734
2735        ConstantRange StartRange = getSignedRange(Start);
2736        ConstantRange EndRange = getSignedRange(End);
2737        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
2738                                   EndRange.getSignedMin());
2739        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
2740                                   EndRange.getSignedMax());
2741        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
2742          return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true);
2743        return ConstantRange(Min, Max+1);
2744      }
2745    }
2746  }
2747
2748  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2749    // For a SCEVUnknown, ask ValueTracking.
2750    unsigned BitWidth = getTypeSizeInBits(U->getType());
2751    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
2752    if (NS == 1)
2753      return FullSet;
2754    return
2755      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
2756                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1);
2757  }
2758
2759  return FullSet;
2760}
2761
2762/// createSCEV - We know that there is no SCEV for the specified value.
2763/// Analyze the expression.
2764///
2765const SCEV *ScalarEvolution::createSCEV(Value *V) {
2766  if (!isSCEVable(V->getType()))
2767    return getUnknown(V);
2768
2769  unsigned Opcode = Instruction::UserOp1;
2770  if (Instruction *I = dyn_cast<Instruction>(V))
2771    Opcode = I->getOpcode();
2772  else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2773    Opcode = CE->getOpcode();
2774  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2775    return getConstant(CI);
2776  else if (isa<ConstantPointerNull>(V))
2777    return getIntegerSCEV(0, V->getType());
2778  else if (isa<UndefValue>(V))
2779    return getIntegerSCEV(0, V->getType());
2780  else
2781    return getUnknown(V);
2782
2783  Operator *U = cast<Operator>(V);
2784  switch (Opcode) {
2785  case Instruction::Add:
2786    return getAddExpr(getSCEV(U->getOperand(0)),
2787                      getSCEV(U->getOperand(1)));
2788  case Instruction::Mul:
2789    return getMulExpr(getSCEV(U->getOperand(0)),
2790                      getSCEV(U->getOperand(1)));
2791  case Instruction::UDiv:
2792    return getUDivExpr(getSCEV(U->getOperand(0)),
2793                       getSCEV(U->getOperand(1)));
2794  case Instruction::Sub:
2795    return getMinusSCEV(getSCEV(U->getOperand(0)),
2796                        getSCEV(U->getOperand(1)));
2797  case Instruction::And:
2798    // For an expression like x&255 that merely masks off the high bits,
2799    // use zext(trunc(x)) as the SCEV expression.
2800    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2801      if (CI->isNullValue())
2802        return getSCEV(U->getOperand(1));
2803      if (CI->isAllOnesValue())
2804        return getSCEV(U->getOperand(0));
2805      const APInt &A = CI->getValue();
2806
2807      // Instcombine's ShrinkDemandedConstant may strip bits out of
2808      // constants, obscuring what would otherwise be a low-bits mask.
2809      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2810      // knew about to reconstruct a low-bits mask value.
2811      unsigned LZ = A.countLeadingZeros();
2812      unsigned BitWidth = A.getBitWidth();
2813      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2814      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2815      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2816
2817      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2818
2819      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2820        return
2821          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2822                                            IntegerType::get(BitWidth - LZ)),
2823                            U->getType());
2824    }
2825    break;
2826
2827  case Instruction::Or:
2828    // If the RHS of the Or is a constant, we may have something like:
2829    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
2830    // optimizations will transparently handle this case.
2831    //
2832    // In order for this transformation to be safe, the LHS must be of the
2833    // form X*(2^n) and the Or constant must be less than 2^n.
2834    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2835      const SCEV *LHS = getSCEV(U->getOperand(0));
2836      const APInt &CIVal = CI->getValue();
2837      if (GetMinTrailingZeros(LHS) >=
2838          (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2839        return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2840    }
2841    break;
2842  case Instruction::Xor:
2843    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2844      // If the RHS of the xor is a signbit, then this is just an add.
2845      // Instcombine turns add of signbit into xor as a strength reduction step.
2846      if (CI->getValue().isSignBit())
2847        return getAddExpr(getSCEV(U->getOperand(0)),
2848                          getSCEV(U->getOperand(1)));
2849
2850      // If the RHS of xor is -1, then this is a not operation.
2851      if (CI->isAllOnesValue())
2852        return getNotSCEV(getSCEV(U->getOperand(0)));
2853
2854      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2855      // This is a variant of the check for xor with -1, and it handles
2856      // the case where instcombine has trimmed non-demanded bits out
2857      // of an xor with -1.
2858      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2859        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2860          if (BO->getOpcode() == Instruction::And &&
2861              LCI->getValue() == CI->getValue())
2862            if (const SCEVZeroExtendExpr *Z =
2863                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2864              const Type *UTy = U->getType();
2865              const SCEV *Z0 = Z->getOperand();
2866              const Type *Z0Ty = Z0->getType();
2867              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2868
2869              // If C is a low-bits mask, the zero extend is zerving to
2870              // mask off the high bits. Complement the operand and
2871              // re-apply the zext.
2872              if (APIntOps::isMask(Z0TySize, CI->getValue()))
2873                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2874
2875              // If C is a single bit, it may be in the sign-bit position
2876              // before the zero-extend. In this case, represent the xor
2877              // using an add, which is equivalent, and re-apply the zext.
2878              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2879              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2880                  Trunc.isSignBit())
2881                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2882                                         UTy);
2883            }
2884    }
2885    break;
2886
2887  case Instruction::Shl:
2888    // Turn shift left of a constant amount into a multiply.
2889    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2890      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2891      Constant *X = Context->getConstantInt(
2892        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2893      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2894    }
2895    break;
2896
2897  case Instruction::LShr:
2898    // Turn logical shift right of a constant into a unsigned divide.
2899    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2900      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2901      Constant *X = Context->getConstantInt(
2902        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2903      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2904    }
2905    break;
2906
2907  case Instruction::AShr:
2908    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2909    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2910      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2911        if (L->getOpcode() == Instruction::Shl &&
2912            L->getOperand(1) == U->getOperand(1)) {
2913          unsigned BitWidth = getTypeSizeInBits(U->getType());
2914          uint64_t Amt = BitWidth - CI->getZExtValue();
2915          if (Amt == BitWidth)
2916            return getSCEV(L->getOperand(0));       // shift by zero --> noop
2917          if (Amt > BitWidth)
2918            return getIntegerSCEV(0, U->getType()); // value is undefined
2919          return
2920            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2921                                                      IntegerType::get(Amt)),
2922                                 U->getType());
2923        }
2924    break;
2925
2926  case Instruction::Trunc:
2927    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2928
2929  case Instruction::ZExt:
2930    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2931
2932  case Instruction::SExt:
2933    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2934
2935  case Instruction::BitCast:
2936    // BitCasts are no-op casts so we just eliminate the cast.
2937    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2938      return getSCEV(U->getOperand(0));
2939    break;
2940
2941  case Instruction::IntToPtr:
2942    if (!TD) break; // Without TD we can't analyze pointers.
2943    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2944                                   TD->getIntPtrType());
2945
2946  case Instruction::PtrToInt:
2947    if (!TD) break; // Without TD we can't analyze pointers.
2948    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2949                                   U->getType());
2950
2951  case Instruction::GetElementPtr:
2952    if (!TD) break; // Without TD we can't analyze pointers.
2953    return createNodeForGEP(U);
2954
2955  case Instruction::PHI:
2956    return createNodeForPHI(cast<PHINode>(U));
2957
2958  case Instruction::Select:
2959    // This could be a smax or umax that was lowered earlier.
2960    // Try to recover it.
2961    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2962      Value *LHS = ICI->getOperand(0);
2963      Value *RHS = ICI->getOperand(1);
2964      switch (ICI->getPredicate()) {
2965      case ICmpInst::ICMP_SLT:
2966      case ICmpInst::ICMP_SLE:
2967        std::swap(LHS, RHS);
2968        // fall through
2969      case ICmpInst::ICMP_SGT:
2970      case ICmpInst::ICMP_SGE:
2971        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2972          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2973        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2974          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2975        break;
2976      case ICmpInst::ICMP_ULT:
2977      case ICmpInst::ICMP_ULE:
2978        std::swap(LHS, RHS);
2979        // fall through
2980      case ICmpInst::ICMP_UGT:
2981      case ICmpInst::ICMP_UGE:
2982        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2983          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2984        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2985          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2986        break;
2987      case ICmpInst::ICMP_NE:
2988        // n != 0 ? n : 1  ->  umax(n, 1)
2989        if (LHS == U->getOperand(1) &&
2990            isa<ConstantInt>(U->getOperand(2)) &&
2991            cast<ConstantInt>(U->getOperand(2))->isOne() &&
2992            isa<ConstantInt>(RHS) &&
2993            cast<ConstantInt>(RHS)->isZero())
2994          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2995        break;
2996      case ICmpInst::ICMP_EQ:
2997        // n == 0 ? 1 : n  ->  umax(n, 1)
2998        if (LHS == U->getOperand(2) &&
2999            isa<ConstantInt>(U->getOperand(1)) &&
3000            cast<ConstantInt>(U->getOperand(1))->isOne() &&
3001            isa<ConstantInt>(RHS) &&
3002            cast<ConstantInt>(RHS)->isZero())
3003          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
3004        break;
3005      default:
3006        break;
3007      }
3008    }
3009
3010  default: // We cannot analyze this expression.
3011    break;
3012  }
3013
3014  return getUnknown(V);
3015}
3016
3017
3018
3019//===----------------------------------------------------------------------===//
3020//                   Iteration Count Computation Code
3021//
3022
3023/// getBackedgeTakenCount - If the specified loop has a predictable
3024/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3025/// object. The backedge-taken count is the number of times the loop header
3026/// will be branched to from within the loop. This is one less than the
3027/// trip count of the loop, since it doesn't count the first iteration,
3028/// when the header is branched to from outside the loop.
3029///
3030/// Note that it is not valid to call this method on a loop without a
3031/// loop-invariant backedge-taken count (see
3032/// hasLoopInvariantBackedgeTakenCount).
3033///
3034const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3035  return getBackedgeTakenInfo(L).Exact;
3036}
3037
3038/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3039/// return the least SCEV value that is known never to be less than the
3040/// actual backedge taken count.
3041const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3042  return getBackedgeTakenInfo(L).Max;
3043}
3044
3045/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3046/// onto the given Worklist.
3047static void
3048PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3049  BasicBlock *Header = L->getHeader();
3050
3051  // Push all Loop-header PHIs onto the Worklist stack.
3052  for (BasicBlock::iterator I = Header->begin();
3053       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3054    Worklist.push_back(PN);
3055}
3056
3057/// PushDefUseChildren - Push users of the given Instruction
3058/// onto the given Worklist.
3059static void
3060PushDefUseChildren(Instruction *I,
3061                   SmallVectorImpl<Instruction *> &Worklist) {
3062  // Push the def-use children onto the Worklist stack.
3063  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
3064       UI != UE; ++UI)
3065    Worklist.push_back(cast<Instruction>(UI));
3066}
3067
3068const ScalarEvolution::BackedgeTakenInfo &
3069ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3070  // Initially insert a CouldNotCompute for this loop. If the insertion
3071  // succeeds, procede to actually compute a backedge-taken count and
3072  // update the value. The temporary CouldNotCompute value tells SCEV
3073  // code elsewhere that it shouldn't attempt to request a new
3074  // backedge-taken count, which could result in infinite recursion.
3075  std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
3076    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3077  if (Pair.second) {
3078    BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
3079    if (ItCount.Exact != getCouldNotCompute()) {
3080      assert(ItCount.Exact->isLoopInvariant(L) &&
3081             ItCount.Max->isLoopInvariant(L) &&
3082             "Computed trip count isn't loop invariant for loop!");
3083      ++NumTripCountsComputed;
3084
3085      // Update the value in the map.
3086      Pair.first->second = ItCount;
3087    } else {
3088      if (ItCount.Max != getCouldNotCompute())
3089        // Update the value in the map.
3090        Pair.first->second = ItCount;
3091      if (isa<PHINode>(L->getHeader()->begin()))
3092        // Only count loops that have phi nodes as not being computable.
3093        ++NumTripCountsNotComputed;
3094    }
3095
3096    // Now that we know more about the trip count for this loop, forget any
3097    // existing SCEV values for PHI nodes in this loop since they are only
3098    // conservative estimates made without the benefit of trip count
3099    // information. This is similar to the code in
3100    // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
3101    // nodes specially.
3102    if (ItCount.hasAnyInfo()) {
3103      SmallVector<Instruction *, 16> Worklist;
3104      PushLoopPHIs(L, Worklist);
3105
3106      SmallPtrSet<Instruction *, 8> Visited;
3107      while (!Worklist.empty()) {
3108        Instruction *I = Worklist.pop_back_val();
3109        if (!Visited.insert(I)) continue;
3110
3111        std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3112          Scalars.find(static_cast<Value *>(I));
3113        if (It != Scalars.end()) {
3114          // SCEVUnknown for a PHI either means that it has an unrecognized
3115          // structure, or it's a PHI that's in the progress of being computed
3116          // by createNodeForPHI.  In the former case, additional loop trip
3117          // count information isn't going to change anything. In the later
3118          // case, createNodeForPHI will perform the necessary updates on its
3119          // own when it gets to that point.
3120          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
3121            Scalars.erase(It);
3122          ValuesAtScopes.erase(I);
3123          if (PHINode *PN = dyn_cast<PHINode>(I))
3124            ConstantEvolutionLoopExitValue.erase(PN);
3125        }
3126
3127        PushDefUseChildren(I, Worklist);
3128      }
3129    }
3130  }
3131  return Pair.first->second;
3132}
3133
3134/// forgetLoopBackedgeTakenCount - This method should be called by the
3135/// client when it has changed a loop in a way that may effect
3136/// ScalarEvolution's ability to compute a trip count, or if the loop
3137/// is deleted.
3138void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
3139  BackedgeTakenCounts.erase(L);
3140
3141  SmallVector<Instruction *, 16> Worklist;
3142  PushLoopPHIs(L, Worklist);
3143
3144  SmallPtrSet<Instruction *, 8> Visited;
3145  while (!Worklist.empty()) {
3146    Instruction *I = Worklist.pop_back_val();
3147    if (!Visited.insert(I)) continue;
3148
3149    std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3150      Scalars.find(static_cast<Value *>(I));
3151    if (It != Scalars.end()) {
3152      Scalars.erase(It);
3153      ValuesAtScopes.erase(I);
3154      if (PHINode *PN = dyn_cast<PHINode>(I))
3155        ConstantEvolutionLoopExitValue.erase(PN);
3156    }
3157
3158    PushDefUseChildren(I, Worklist);
3159  }
3160}
3161
3162/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3163/// of the specified loop will execute.
3164ScalarEvolution::BackedgeTakenInfo
3165ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3166  SmallVector<BasicBlock*, 8> ExitingBlocks;
3167  L->getExitingBlocks(ExitingBlocks);
3168
3169  // Examine all exits and pick the most conservative values.
3170  const SCEV *BECount = getCouldNotCompute();
3171  const SCEV *MaxBECount = getCouldNotCompute();
3172  bool CouldNotComputeBECount = false;
3173  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3174    BackedgeTakenInfo NewBTI =
3175      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3176
3177    if (NewBTI.Exact == getCouldNotCompute()) {
3178      // We couldn't compute an exact value for this exit, so
3179      // we won't be able to compute an exact value for the loop.
3180      CouldNotComputeBECount = true;
3181      BECount = getCouldNotCompute();
3182    } else if (!CouldNotComputeBECount) {
3183      if (BECount == getCouldNotCompute())
3184        BECount = NewBTI.Exact;
3185      else
3186        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3187    }
3188    if (MaxBECount == getCouldNotCompute())
3189      MaxBECount = NewBTI.Max;
3190    else if (NewBTI.Max != getCouldNotCompute())
3191      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3192  }
3193
3194  return BackedgeTakenInfo(BECount, MaxBECount);
3195}
3196
3197/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3198/// of the specified loop will execute if it exits via the specified block.
3199ScalarEvolution::BackedgeTakenInfo
3200ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3201                                                   BasicBlock *ExitingBlock) {
3202
3203  // Okay, we've chosen an exiting block.  See what condition causes us to
3204  // exit at this block.
3205  //
3206  // FIXME: we should be able to handle switch instructions (with a single exit)
3207  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3208  if (ExitBr == 0) return getCouldNotCompute();
3209  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3210
3211  // At this point, we know we have a conditional branch that determines whether
3212  // the loop is exited.  However, we don't know if the branch is executed each
3213  // time through the loop.  If not, then the execution count of the branch will
3214  // not be equal to the trip count of the loop.
3215  //
3216  // Currently we check for this by checking to see if the Exit branch goes to
3217  // the loop header.  If so, we know it will always execute the same number of
3218  // times as the loop.  We also handle the case where the exit block *is* the
3219  // loop header.  This is common for un-rotated loops.
3220  //
3221  // If both of those tests fail, walk up the unique predecessor chain to the
3222  // header, stopping if there is an edge that doesn't exit the loop. If the
3223  // header is reached, the execution count of the branch will be equal to the
3224  // trip count of the loop.
3225  //
3226  //  More extensive analysis could be done to handle more cases here.
3227  //
3228  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3229      ExitBr->getSuccessor(1) != L->getHeader() &&
3230      ExitBr->getParent() != L->getHeader()) {
3231    // The simple checks failed, try climbing the unique predecessor chain
3232    // up to the header.
3233    bool Ok = false;
3234    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3235      BasicBlock *Pred = BB->getUniquePredecessor();
3236      if (!Pred)
3237        return getCouldNotCompute();
3238      TerminatorInst *PredTerm = Pred->getTerminator();
3239      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3240        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3241        if (PredSucc == BB)
3242          continue;
3243        // If the predecessor has a successor that isn't BB and isn't
3244        // outside the loop, assume the worst.
3245        if (L->contains(PredSucc))
3246          return getCouldNotCompute();
3247      }
3248      if (Pred == L->getHeader()) {
3249        Ok = true;
3250        break;
3251      }
3252      BB = Pred;
3253    }
3254    if (!Ok)
3255      return getCouldNotCompute();
3256  }
3257
3258  // Procede to the next level to examine the exit condition expression.
3259  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3260                                               ExitBr->getSuccessor(0),
3261                                               ExitBr->getSuccessor(1));
3262}
3263
3264/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3265/// backedge of the specified loop will execute if its exit condition
3266/// were a conditional branch of ExitCond, TBB, and FBB.
3267ScalarEvolution::BackedgeTakenInfo
3268ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3269                                                       Value *ExitCond,
3270                                                       BasicBlock *TBB,
3271                                                       BasicBlock *FBB) {
3272  // Check if the controlling expression for this loop is an And or Or.
3273  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3274    if (BO->getOpcode() == Instruction::And) {
3275      // Recurse on the operands of the and.
3276      BackedgeTakenInfo BTI0 =
3277        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3278      BackedgeTakenInfo BTI1 =
3279        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3280      const SCEV *BECount = getCouldNotCompute();
3281      const SCEV *MaxBECount = getCouldNotCompute();
3282      if (L->contains(TBB)) {
3283        // Both conditions must be true for the loop to continue executing.
3284        // Choose the less conservative count.
3285        if (BTI0.Exact == getCouldNotCompute() ||
3286            BTI1.Exact == getCouldNotCompute())
3287          BECount = getCouldNotCompute();
3288        else
3289          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3290        if (BTI0.Max == getCouldNotCompute())
3291          MaxBECount = BTI1.Max;
3292        else if (BTI1.Max == getCouldNotCompute())
3293          MaxBECount = BTI0.Max;
3294        else
3295          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3296      } else {
3297        // Both conditions must be true for the loop to exit.
3298        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3299        if (BTI0.Exact != getCouldNotCompute() &&
3300            BTI1.Exact != getCouldNotCompute())
3301          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3302        if (BTI0.Max != getCouldNotCompute() &&
3303            BTI1.Max != getCouldNotCompute())
3304          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3305      }
3306
3307      return BackedgeTakenInfo(BECount, MaxBECount);
3308    }
3309    if (BO->getOpcode() == Instruction::Or) {
3310      // Recurse on the operands of the or.
3311      BackedgeTakenInfo BTI0 =
3312        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3313      BackedgeTakenInfo BTI1 =
3314        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3315      const SCEV *BECount = getCouldNotCompute();
3316      const SCEV *MaxBECount = getCouldNotCompute();
3317      if (L->contains(FBB)) {
3318        // Both conditions must be false for the loop to continue executing.
3319        // Choose the less conservative count.
3320        if (BTI0.Exact == getCouldNotCompute() ||
3321            BTI1.Exact == getCouldNotCompute())
3322          BECount = getCouldNotCompute();
3323        else
3324          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3325        if (BTI0.Max == getCouldNotCompute())
3326          MaxBECount = BTI1.Max;
3327        else if (BTI1.Max == getCouldNotCompute())
3328          MaxBECount = BTI0.Max;
3329        else
3330          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3331      } else {
3332        // Both conditions must be false for the loop to exit.
3333        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3334        if (BTI0.Exact != getCouldNotCompute() &&
3335            BTI1.Exact != getCouldNotCompute())
3336          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3337        if (BTI0.Max != getCouldNotCompute() &&
3338            BTI1.Max != getCouldNotCompute())
3339          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3340      }
3341
3342      return BackedgeTakenInfo(BECount, MaxBECount);
3343    }
3344  }
3345
3346  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3347  // Procede to the next level to examine the icmp.
3348  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3349    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3350
3351  // If it's not an integer or pointer comparison then compute it the hard way.
3352  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3353}
3354
3355/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3356/// backedge of the specified loop will execute if its exit condition
3357/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3358ScalarEvolution::BackedgeTakenInfo
3359ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3360                                                           ICmpInst *ExitCond,
3361                                                           BasicBlock *TBB,
3362                                                           BasicBlock *FBB) {
3363
3364  // If the condition was exit on true, convert the condition to exit on false
3365  ICmpInst::Predicate Cond;
3366  if (!L->contains(FBB))
3367    Cond = ExitCond->getPredicate();
3368  else
3369    Cond = ExitCond->getInversePredicate();
3370
3371  // Handle common loops like: for (X = "string"; *X; ++X)
3372  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3373    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3374      const SCEV *ItCnt =
3375        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3376      if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3377        unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3378        return BackedgeTakenInfo(ItCnt,
3379                                 isa<SCEVConstant>(ItCnt) ? ItCnt :
3380                                   getConstant(APInt::getMaxValue(BitWidth)-1));
3381      }
3382    }
3383
3384  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3385  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3386
3387  // Try to evaluate any dependencies out of the loop.
3388  LHS = getSCEVAtScope(LHS, L);
3389  RHS = getSCEVAtScope(RHS, L);
3390
3391  // At this point, we would like to compute how many iterations of the
3392  // loop the predicate will return true for these inputs.
3393  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3394    // If there is a loop-invariant, force it into the RHS.
3395    std::swap(LHS, RHS);
3396    Cond = ICmpInst::getSwappedPredicate(Cond);
3397  }
3398
3399  // If we have a comparison of a chrec against a constant, try to use value
3400  // ranges to answer this query.
3401  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3402    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3403      if (AddRec->getLoop() == L) {
3404        // Form the constant range.
3405        ConstantRange CompRange(
3406            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3407
3408        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3409        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3410      }
3411
3412  switch (Cond) {
3413  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3414    // Convert to: while (X-Y != 0)
3415    const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3416    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3417    break;
3418  }
3419  case ICmpInst::ICMP_EQ: {
3420    // Convert to: while (X-Y == 0)           // while (X == Y)
3421    const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3422    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3423    break;
3424  }
3425  case ICmpInst::ICMP_SLT: {
3426    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3427    if (BTI.hasAnyInfo()) return BTI;
3428    break;
3429  }
3430  case ICmpInst::ICMP_SGT: {
3431    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3432                                             getNotSCEV(RHS), L, true);
3433    if (BTI.hasAnyInfo()) return BTI;
3434    break;
3435  }
3436  case ICmpInst::ICMP_ULT: {
3437    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3438    if (BTI.hasAnyInfo()) return BTI;
3439    break;
3440  }
3441  case ICmpInst::ICMP_UGT: {
3442    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3443                                             getNotSCEV(RHS), L, false);
3444    if (BTI.hasAnyInfo()) return BTI;
3445    break;
3446  }
3447  default:
3448#if 0
3449    errs() << "ComputeBackedgeTakenCount ";
3450    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3451      errs() << "[unsigned] ";
3452    errs() << *LHS << "   "
3453         << Instruction::getOpcodeName(Instruction::ICmp)
3454         << "   " << *RHS << "\n";
3455#endif
3456    break;
3457  }
3458  return
3459    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3460}
3461
3462static ConstantInt *
3463EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3464                                ScalarEvolution &SE) {
3465  const SCEV *InVal = SE.getConstant(C);
3466  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3467  assert(isa<SCEVConstant>(Val) &&
3468         "Evaluation of SCEV at constant didn't fold correctly?");
3469  return cast<SCEVConstant>(Val)->getValue();
3470}
3471
3472/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3473/// and a GEP expression (missing the pointer index) indexing into it, return
3474/// the addressed element of the initializer or null if the index expression is
3475/// invalid.
3476static Constant *
3477GetAddressedElementFromGlobal(LLVMContext *Context, GlobalVariable *GV,
3478                              const std::vector<ConstantInt*> &Indices) {
3479  Constant *Init = GV->getInitializer();
3480  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3481    uint64_t Idx = Indices[i]->getZExtValue();
3482    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3483      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3484      Init = cast<Constant>(CS->getOperand(Idx));
3485    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3486      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3487      Init = cast<Constant>(CA->getOperand(Idx));
3488    } else if (isa<ConstantAggregateZero>(Init)) {
3489      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3490        assert(Idx < STy->getNumElements() && "Bad struct index!");
3491        Init = Context->getNullValue(STy->getElementType(Idx));
3492      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3493        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3494        Init = Context->getNullValue(ATy->getElementType());
3495      } else {
3496        llvm_unreachable("Unknown constant aggregate type!");
3497      }
3498      return 0;
3499    } else {
3500      return 0; // Unknown initializer type
3501    }
3502  }
3503  return Init;
3504}
3505
3506/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3507/// 'icmp op load X, cst', try to see if we can compute the backedge
3508/// execution count.
3509const SCEV *
3510ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3511                                                LoadInst *LI,
3512                                                Constant *RHS,
3513                                                const Loop *L,
3514                                                ICmpInst::Predicate predicate) {
3515  if (LI->isVolatile()) return getCouldNotCompute();
3516
3517  // Check to see if the loaded pointer is a getelementptr of a global.
3518  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3519  if (!GEP) return getCouldNotCompute();
3520
3521  // Make sure that it is really a constant global we are gepping, with an
3522  // initializer, and make sure the first IDX is really 0.
3523  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3524  if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3525      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3526      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3527    return getCouldNotCompute();
3528
3529  // Okay, we allow one non-constant index into the GEP instruction.
3530  Value *VarIdx = 0;
3531  std::vector<ConstantInt*> Indexes;
3532  unsigned VarIdxNum = 0;
3533  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3534    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3535      Indexes.push_back(CI);
3536    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3537      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3538      VarIdx = GEP->getOperand(i);
3539      VarIdxNum = i-2;
3540      Indexes.push_back(0);
3541    }
3542
3543  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3544  // Check to see if X is a loop variant variable value now.
3545  const SCEV *Idx = getSCEV(VarIdx);
3546  Idx = getSCEVAtScope(Idx, L);
3547
3548  // We can only recognize very limited forms of loop index expressions, in
3549  // particular, only affine AddRec's like {C1,+,C2}.
3550  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3551  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3552      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3553      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3554    return getCouldNotCompute();
3555
3556  unsigned MaxSteps = MaxBruteForceIterations;
3557  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3558    ConstantInt *ItCst = Context->getConstantInt(
3559                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
3560    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3561
3562    // Form the GEP offset.
3563    Indexes[VarIdxNum] = Val;
3564
3565    Constant *Result = GetAddressedElementFromGlobal(Context, GV, Indexes);
3566    if (Result == 0) break;  // Cannot compute!
3567
3568    // Evaluate the condition for this iteration.
3569    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3570    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3571    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3572#if 0
3573      errs() << "\n***\n*** Computed loop count " << *ItCst
3574             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3575             << "***\n";
3576#endif
3577      ++NumArrayLenItCounts;
3578      return getConstant(ItCst);   // Found terminating iteration!
3579    }
3580  }
3581  return getCouldNotCompute();
3582}
3583
3584
3585/// CanConstantFold - Return true if we can constant fold an instruction of the
3586/// specified type, assuming that all operands were constants.
3587static bool CanConstantFold(const Instruction *I) {
3588  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3589      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3590    return true;
3591
3592  if (const CallInst *CI = dyn_cast<CallInst>(I))
3593    if (const Function *F = CI->getCalledFunction())
3594      return canConstantFoldCallTo(F);
3595  return false;
3596}
3597
3598/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3599/// in the loop that V is derived from.  We allow arbitrary operations along the
3600/// way, but the operands of an operation must either be constants or a value
3601/// derived from a constant PHI.  If this expression does not fit with these
3602/// constraints, return null.
3603static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3604  // If this is not an instruction, or if this is an instruction outside of the
3605  // loop, it can't be derived from a loop PHI.
3606  Instruction *I = dyn_cast<Instruction>(V);
3607  if (I == 0 || !L->contains(I->getParent())) return 0;
3608
3609  if (PHINode *PN = dyn_cast<PHINode>(I)) {
3610    if (L->getHeader() == I->getParent())
3611      return PN;
3612    else
3613      // We don't currently keep track of the control flow needed to evaluate
3614      // PHIs, so we cannot handle PHIs inside of loops.
3615      return 0;
3616  }
3617
3618  // If we won't be able to constant fold this expression even if the operands
3619  // are constants, return early.
3620  if (!CanConstantFold(I)) return 0;
3621
3622  // Otherwise, we can evaluate this instruction if all of its operands are
3623  // constant or derived from a PHI node themselves.
3624  PHINode *PHI = 0;
3625  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3626    if (!(isa<Constant>(I->getOperand(Op)) ||
3627          isa<GlobalValue>(I->getOperand(Op)))) {
3628      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3629      if (P == 0) return 0;  // Not evolving from PHI
3630      if (PHI == 0)
3631        PHI = P;
3632      else if (PHI != P)
3633        return 0;  // Evolving from multiple different PHIs.
3634    }
3635
3636  // This is a expression evolving from a constant PHI!
3637  return PHI;
3638}
3639
3640/// EvaluateExpression - Given an expression that passes the
3641/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3642/// in the loop has the value PHIVal.  If we can't fold this expression for some
3643/// reason, return null.
3644static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3645  if (isa<PHINode>(V)) return PHIVal;
3646  if (Constant *C = dyn_cast<Constant>(V)) return C;
3647  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3648  Instruction *I = cast<Instruction>(V);
3649  LLVMContext *Context = I->getParent()->getContext();
3650
3651  std::vector<Constant*> Operands;
3652  Operands.resize(I->getNumOperands());
3653
3654  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3655    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3656    if (Operands[i] == 0) return 0;
3657  }
3658
3659  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3660    return ConstantFoldCompareInstOperands(CI->getPredicate(),
3661                                           &Operands[0], Operands.size(),
3662                                           Context);
3663  else
3664    return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3665                                    &Operands[0], Operands.size(),
3666                                    Context);
3667}
3668
3669/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3670/// in the header of its containing loop, we know the loop executes a
3671/// constant number of times, and the PHI node is just a recurrence
3672/// involving constants, fold it.
3673Constant *
3674ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3675                                                   const APInt& BEs,
3676                                                   const Loop *L) {
3677  std::map<PHINode*, Constant*>::iterator I =
3678    ConstantEvolutionLoopExitValue.find(PN);
3679  if (I != ConstantEvolutionLoopExitValue.end())
3680    return I->second;
3681
3682  if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3683    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
3684
3685  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3686
3687  // Since the loop is canonicalized, the PHI node must have two entries.  One
3688  // entry must be a constant (coming in from outside of the loop), and the
3689  // second must be derived from the same PHI.
3690  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3691  Constant *StartCST =
3692    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3693  if (StartCST == 0)
3694    return RetVal = 0;  // Must be a constant.
3695
3696  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3697  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3698  if (PN2 != PN)
3699    return RetVal = 0;  // Not derived from same PHI.
3700
3701  // Execute the loop symbolically to determine the exit value.
3702  if (BEs.getActiveBits() >= 32)
3703    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3704
3705  unsigned NumIterations = BEs.getZExtValue(); // must be in range
3706  unsigned IterationNum = 0;
3707  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3708    if (IterationNum == NumIterations)
3709      return RetVal = PHIVal;  // Got exit value!
3710
3711    // Compute the value of the PHI node for the next iteration.
3712    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3713    if (NextPHI == PHIVal)
3714      return RetVal = NextPHI;  // Stopped evolving!
3715    if (NextPHI == 0)
3716      return 0;        // Couldn't evaluate!
3717    PHIVal = NextPHI;
3718  }
3719}
3720
3721/// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3722/// constant number of times (the condition evolves only from constants),
3723/// try to evaluate a few iterations of the loop until we get the exit
3724/// condition gets a value of ExitWhen (true or false).  If we cannot
3725/// evaluate the trip count of the loop, return getCouldNotCompute().
3726const SCEV *
3727ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3728                                                       Value *Cond,
3729                                                       bool ExitWhen) {
3730  PHINode *PN = getConstantEvolvingPHI(Cond, L);
3731  if (PN == 0) return getCouldNotCompute();
3732
3733  // Since the loop is canonicalized, the PHI node must have two entries.  One
3734  // entry must be a constant (coming in from outside of the loop), and the
3735  // second must be derived from the same PHI.
3736  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3737  Constant *StartCST =
3738    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3739  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
3740
3741  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3742  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3743  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
3744
3745  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
3746  // the loop symbolically to determine when the condition gets a value of
3747  // "ExitWhen".
3748  unsigned IterationNum = 0;
3749  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
3750  for (Constant *PHIVal = StartCST;
3751       IterationNum != MaxIterations; ++IterationNum) {
3752    ConstantInt *CondVal =
3753      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3754
3755    // Couldn't symbolically evaluate.
3756    if (!CondVal) return getCouldNotCompute();
3757
3758    if (CondVal->getValue() == uint64_t(ExitWhen)) {
3759      ++NumBruteForceTripCountsComputed;
3760      return getConstant(Type::Int32Ty, IterationNum);
3761    }
3762
3763    // Compute the value of the PHI node for the next iteration.
3764    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3765    if (NextPHI == 0 || NextPHI == PHIVal)
3766      return getCouldNotCompute();// Couldn't evaluate or not making progress...
3767    PHIVal = NextPHI;
3768  }
3769
3770  // Too many iterations were needed to evaluate.
3771  return getCouldNotCompute();
3772}
3773
3774/// getSCEVAtScope - Return a SCEV expression handle for the specified value
3775/// at the specified scope in the program.  The L value specifies a loop
3776/// nest to evaluate the expression at, where null is the top-level or a
3777/// specified loop is immediately inside of the loop.
3778///
3779/// This method can be used to compute the exit value for a variable defined
3780/// in a loop by querying what the value will hold in the parent loop.
3781///
3782/// In the case that a relevant loop exit value cannot be computed, the
3783/// original value V is returned.
3784const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3785  // FIXME: this should be turned into a virtual method on SCEV!
3786
3787  if (isa<SCEVConstant>(V)) return V;
3788
3789  // If this instruction is evolved from a constant-evolving PHI, compute the
3790  // exit value from the loop without using SCEVs.
3791  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3792    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3793      const Loop *LI = (*this->LI)[I->getParent()];
3794      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
3795        if (PHINode *PN = dyn_cast<PHINode>(I))
3796          if (PN->getParent() == LI->getHeader()) {
3797            // Okay, there is no closed form solution for the PHI node.  Check
3798            // to see if the loop that contains it has a known backedge-taken
3799            // count.  If so, we may be able to force computation of the exit
3800            // value.
3801            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3802            if (const SCEVConstant *BTCC =
3803                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3804              // Okay, we know how many times the containing loop executes.  If
3805              // this is a constant evolving PHI node, get the final value at
3806              // the specified iteration number.
3807              Constant *RV = getConstantEvolutionLoopExitValue(PN,
3808                                                   BTCC->getValue()->getValue(),
3809                                                               LI);
3810              if (RV) return getSCEV(RV);
3811            }
3812          }
3813
3814      // Okay, this is an expression that we cannot symbolically evaluate
3815      // into a SCEV.  Check to see if it's possible to symbolically evaluate
3816      // the arguments into constants, and if so, try to constant propagate the
3817      // result.  This is particularly useful for computing loop exit values.
3818      if (CanConstantFold(I)) {
3819        // Check to see if we've folded this instruction at this loop before.
3820        std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3821        std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3822          Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3823        if (!Pair.second)
3824          return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3825
3826        std::vector<Constant*> Operands;
3827        Operands.reserve(I->getNumOperands());
3828        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3829          Value *Op = I->getOperand(i);
3830          if (Constant *C = dyn_cast<Constant>(Op)) {
3831            Operands.push_back(C);
3832          } else {
3833            // If any of the operands is non-constant and if they are
3834            // non-integer and non-pointer, don't even try to analyze them
3835            // with scev techniques.
3836            if (!isSCEVable(Op->getType()))
3837              return V;
3838
3839            const SCEV* OpV = getSCEVAtScope(Op, L);
3840            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3841              Constant *C = SC->getValue();
3842              if (C->getType() != Op->getType())
3843                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3844                                                                  Op->getType(),
3845                                                                  false),
3846                                          C, Op->getType());
3847              Operands.push_back(C);
3848            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3849              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3850                if (C->getType() != Op->getType())
3851                  C =
3852                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3853                                                                  Op->getType(),
3854                                                                  false),
3855                                          C, Op->getType());
3856                Operands.push_back(C);
3857              } else
3858                return V;
3859            } else {
3860              return V;
3861            }
3862          }
3863        }
3864
3865        Constant *C;
3866        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3867          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3868                                              &Operands[0], Operands.size(),
3869                                              Context);
3870        else
3871          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3872                                       &Operands[0], Operands.size(), Context);
3873        Pair.first->second = C;
3874        return getSCEV(C);
3875      }
3876    }
3877
3878    // This is some other type of SCEVUnknown, just return it.
3879    return V;
3880  }
3881
3882  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3883    // Avoid performing the look-up in the common case where the specified
3884    // expression has no loop-variant portions.
3885    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3886      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3887      if (OpAtScope != Comm->getOperand(i)) {
3888        // Okay, at least one of these operands is loop variant but might be
3889        // foldable.  Build a new instance of the folded commutative expression.
3890        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3891                                            Comm->op_begin()+i);
3892        NewOps.push_back(OpAtScope);
3893
3894        for (++i; i != e; ++i) {
3895          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3896          NewOps.push_back(OpAtScope);
3897        }
3898        if (isa<SCEVAddExpr>(Comm))
3899          return getAddExpr(NewOps);
3900        if (isa<SCEVMulExpr>(Comm))
3901          return getMulExpr(NewOps);
3902        if (isa<SCEVSMaxExpr>(Comm))
3903          return getSMaxExpr(NewOps);
3904        if (isa<SCEVUMaxExpr>(Comm))
3905          return getUMaxExpr(NewOps);
3906        llvm_unreachable("Unknown commutative SCEV type!");
3907      }
3908    }
3909    // If we got here, all operands are loop invariant.
3910    return Comm;
3911  }
3912
3913  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3914    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3915    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3916    if (LHS == Div->getLHS() && RHS == Div->getRHS())
3917      return Div;   // must be loop invariant
3918    return getUDivExpr(LHS, RHS);
3919  }
3920
3921  // If this is a loop recurrence for a loop that does not contain L, then we
3922  // are dealing with the final value computed by the loop.
3923  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3924    if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3925      // To evaluate this recurrence, we need to know how many times the AddRec
3926      // loop iterates.  Compute this now.
3927      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3928      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3929
3930      // Then, evaluate the AddRec.
3931      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3932    }
3933    return AddRec;
3934  }
3935
3936  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3937    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3938    if (Op == Cast->getOperand())
3939      return Cast;  // must be loop invariant
3940    return getZeroExtendExpr(Op, Cast->getType());
3941  }
3942
3943  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3944    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3945    if (Op == Cast->getOperand())
3946      return Cast;  // must be loop invariant
3947    return getSignExtendExpr(Op, Cast->getType());
3948  }
3949
3950  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3951    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3952    if (Op == Cast->getOperand())
3953      return Cast;  // must be loop invariant
3954    return getTruncateExpr(Op, Cast->getType());
3955  }
3956
3957  llvm_unreachable("Unknown SCEV type!");
3958  return 0;
3959}
3960
3961/// getSCEVAtScope - This is a convenience function which does
3962/// getSCEVAtScope(getSCEV(V), L).
3963const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3964  return getSCEVAtScope(getSCEV(V), L);
3965}
3966
3967/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3968/// following equation:
3969///
3970///     A * X = B (mod N)
3971///
3972/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3973/// A and B isn't important.
3974///
3975/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3976static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3977                                               ScalarEvolution &SE) {
3978  uint32_t BW = A.getBitWidth();
3979  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3980  assert(A != 0 && "A must be non-zero.");
3981
3982  // 1. D = gcd(A, N)
3983  //
3984  // The gcd of A and N may have only one prime factor: 2. The number of
3985  // trailing zeros in A is its multiplicity
3986  uint32_t Mult2 = A.countTrailingZeros();
3987  // D = 2^Mult2
3988
3989  // 2. Check if B is divisible by D.
3990  //
3991  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3992  // is not less than multiplicity of this prime factor for D.
3993  if (B.countTrailingZeros() < Mult2)
3994    return SE.getCouldNotCompute();
3995
3996  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3997  // modulo (N / D).
3998  //
3999  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4000  // bit width during computations.
4001  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4002  APInt Mod(BW + 1, 0);
4003  Mod.set(BW - Mult2);  // Mod = N / D
4004  APInt I = AD.multiplicativeInverse(Mod);
4005
4006  // 4. Compute the minimum unsigned root of the equation:
4007  // I * (B / D) mod (N / D)
4008  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4009
4010  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4011  // bits.
4012  return SE.getConstant(Result.trunc(BW));
4013}
4014
4015/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4016/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4017/// might be the same) or two SCEVCouldNotCompute objects.
4018///
4019static std::pair<const SCEV *,const SCEV *>
4020SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4021  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4022  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4023  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4024  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4025
4026  // We currently can only solve this if the coefficients are constants.
4027  if (!LC || !MC || !NC) {
4028    const SCEV *CNC = SE.getCouldNotCompute();
4029    return std::make_pair(CNC, CNC);
4030  }
4031
4032  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4033  const APInt &L = LC->getValue()->getValue();
4034  const APInt &M = MC->getValue()->getValue();
4035  const APInt &N = NC->getValue()->getValue();
4036  APInt Two(BitWidth, 2);
4037  APInt Four(BitWidth, 4);
4038
4039  {
4040    using namespace APIntOps;
4041    const APInt& C = L;
4042    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4043    // The B coefficient is M-N/2
4044    APInt B(M);
4045    B -= sdiv(N,Two);
4046
4047    // The A coefficient is N/2
4048    APInt A(N.sdiv(Two));
4049
4050    // Compute the B^2-4ac term.
4051    APInt SqrtTerm(B);
4052    SqrtTerm *= B;
4053    SqrtTerm -= Four * (A * C);
4054
4055    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4056    // integer value or else APInt::sqrt() will assert.
4057    APInt SqrtVal(SqrtTerm.sqrt());
4058
4059    // Compute the two solutions for the quadratic formula.
4060    // The divisions must be performed as signed divisions.
4061    APInt NegB(-B);
4062    APInt TwoA( A << 1 );
4063    if (TwoA.isMinValue()) {
4064      const SCEV *CNC = SE.getCouldNotCompute();
4065      return std::make_pair(CNC, CNC);
4066    }
4067
4068    LLVMContext *Context = SE.getContext();
4069
4070    ConstantInt *Solution1 =
4071      Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA));
4072    ConstantInt *Solution2 =
4073      Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA));
4074
4075    return std::make_pair(SE.getConstant(Solution1),
4076                          SE.getConstant(Solution2));
4077    } // end APIntOps namespace
4078}
4079
4080/// HowFarToZero - Return the number of times a backedge comparing the specified
4081/// value to zero will execute.  If not computable, return CouldNotCompute.
4082const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4083  // If the value is a constant
4084  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4085    // If the value is already zero, the branch will execute zero times.
4086    if (C->getValue()->isZero()) return C;
4087    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4088  }
4089
4090  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4091  if (!AddRec || AddRec->getLoop() != L)
4092    return getCouldNotCompute();
4093
4094  if (AddRec->isAffine()) {
4095    // If this is an affine expression, the execution count of this branch is
4096    // the minimum unsigned root of the following equation:
4097    //
4098    //     Start + Step*N = 0 (mod 2^BW)
4099    //
4100    // equivalent to:
4101    //
4102    //             Step*N = -Start (mod 2^BW)
4103    //
4104    // where BW is the common bit width of Start and Step.
4105
4106    // Get the initial value for the loop.
4107    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4108                                       L->getParentLoop());
4109    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4110                                      L->getParentLoop());
4111
4112    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4113      // For now we handle only constant steps.
4114
4115      // First, handle unitary steps.
4116      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4117        return getNegativeSCEV(Start);       //   N = -Start (as unsigned)
4118      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4119        return Start;                           //    N = Start (as unsigned)
4120
4121      // Then, try to solve the above equation provided that Start is constant.
4122      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4123        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4124                                            -StartC->getValue()->getValue(),
4125                                            *this);
4126    }
4127  } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
4128    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4129    // the quadratic equation to solve it.
4130    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4131                                                                    *this);
4132    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4133    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4134    if (R1) {
4135#if 0
4136      errs() << "HFTZ: " << *V << " - sol#1: " << *R1
4137             << "  sol#2: " << *R2 << "\n";
4138#endif
4139      // Pick the smallest positive root value.
4140      if (ConstantInt *CB =
4141          dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT,
4142                                   R1->getValue(), R2->getValue()))) {
4143        if (CB->getZExtValue() == false)
4144          std::swap(R1, R2);   // R1 is the minimum root now.
4145
4146        // We can only use this value if the chrec ends up with an exact zero
4147        // value at this index.  When solving for "X*X != 5", for example, we
4148        // should not accept a root of 2.
4149        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4150        if (Val->isZero())
4151          return R1;  // We found a quadratic root!
4152      }
4153    }
4154  }
4155
4156  return getCouldNotCompute();
4157}
4158
4159/// HowFarToNonZero - Return the number of times a backedge checking the
4160/// specified value for nonzero will execute.  If not computable, return
4161/// CouldNotCompute
4162const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4163  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4164  // handle them yet except for the trivial case.  This could be expanded in the
4165  // future as needed.
4166
4167  // If the value is a constant, check to see if it is known to be non-zero
4168  // already.  If so, the backedge will execute zero times.
4169  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4170    if (!C->getValue()->isNullValue())
4171      return getIntegerSCEV(0, C->getType());
4172    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4173  }
4174
4175  // We could implement others, but I really doubt anyone writes loops like
4176  // this, and if they did, they would already be constant folded.
4177  return getCouldNotCompute();
4178}
4179
4180/// getLoopPredecessor - If the given loop's header has exactly one unique
4181/// predecessor outside the loop, return it. Otherwise return null.
4182///
4183BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4184  BasicBlock *Header = L->getHeader();
4185  BasicBlock *Pred = 0;
4186  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4187       PI != E; ++PI)
4188    if (!L->contains(*PI)) {
4189      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4190      Pred = *PI;
4191    }
4192  return Pred;
4193}
4194
4195/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4196/// (which may not be an immediate predecessor) which has exactly one
4197/// successor from which BB is reachable, or null if no such block is
4198/// found.
4199///
4200BasicBlock *
4201ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4202  // If the block has a unique predecessor, then there is no path from the
4203  // predecessor to the block that does not go through the direct edge
4204  // from the predecessor to the block.
4205  if (BasicBlock *Pred = BB->getSinglePredecessor())
4206    return Pred;
4207
4208  // A loop's header is defined to be a block that dominates the loop.
4209  // If the header has a unique predecessor outside the loop, it must be
4210  // a block that has exactly one successor that can reach the loop.
4211  if (Loop *L = LI->getLoopFor(BB))
4212    return getLoopPredecessor(L);
4213
4214  return 0;
4215}
4216
4217/// HasSameValue - SCEV structural equivalence is usually sufficient for
4218/// testing whether two expressions are equal, however for the purposes of
4219/// looking for a condition guarding a loop, it can be useful to be a little
4220/// more general, since a front-end may have replicated the controlling
4221/// expression.
4222///
4223static bool HasSameValue(const SCEV *A, const SCEV *B) {
4224  // Quick check to see if they are the same SCEV.
4225  if (A == B) return true;
4226
4227  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4228  // two different instructions with the same value. Check for this case.
4229  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4230    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4231      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4232        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4233          if (AI->isIdenticalTo(BI))
4234            return true;
4235
4236  // Otherwise assume they may have a different value.
4237  return false;
4238}
4239
4240bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4241  return getSignedRange(S).getSignedMax().isNegative();
4242}
4243
4244bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4245  return getSignedRange(S).getSignedMin().isStrictlyPositive();
4246}
4247
4248bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4249  return !getSignedRange(S).getSignedMin().isNegative();
4250}
4251
4252bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4253  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4254}
4255
4256bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4257  return isKnownNegative(S) || isKnownPositive(S);
4258}
4259
4260bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4261                                       const SCEV *LHS, const SCEV *RHS) {
4262
4263  if (HasSameValue(LHS, RHS))
4264    return ICmpInst::isTrueWhenEqual(Pred);
4265
4266  switch (Pred) {
4267  default:
4268    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4269    break;
4270  case ICmpInst::ICMP_SGT:
4271    Pred = ICmpInst::ICMP_SLT;
4272    std::swap(LHS, RHS);
4273  case ICmpInst::ICMP_SLT: {
4274    ConstantRange LHSRange = getSignedRange(LHS);
4275    ConstantRange RHSRange = getSignedRange(RHS);
4276    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4277      return true;
4278    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4279      return false;
4280
4281    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4282    ConstantRange DiffRange = getUnsignedRange(Diff);
4283    if (isKnownNegative(Diff)) {
4284      if (DiffRange.getUnsignedMax().ult(LHSRange.getUnsignedMin()))
4285        return true;
4286      if (DiffRange.getUnsignedMin().uge(LHSRange.getUnsignedMax()))
4287        return false;
4288    } else if (isKnownPositive(Diff)) {
4289      if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin()))
4290        return true;
4291      if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax()))
4292        return false;
4293    }
4294    break;
4295  }
4296  case ICmpInst::ICMP_SGE:
4297    Pred = ICmpInst::ICMP_SLE;
4298    std::swap(LHS, RHS);
4299  case ICmpInst::ICMP_SLE: {
4300    ConstantRange LHSRange = getSignedRange(LHS);
4301    ConstantRange RHSRange = getSignedRange(RHS);
4302    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4303      return true;
4304    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4305      return false;
4306
4307    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4308    ConstantRange DiffRange = getUnsignedRange(Diff);
4309    if (isKnownNonPositive(Diff)) {
4310      if (DiffRange.getUnsignedMax().ule(LHSRange.getUnsignedMin()))
4311        return true;
4312      if (DiffRange.getUnsignedMin().ugt(LHSRange.getUnsignedMax()))
4313        return false;
4314    } else if (isKnownNonNegative(Diff)) {
4315      if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin()))
4316        return true;
4317      if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax()))
4318        return false;
4319    }
4320    break;
4321  }
4322  case ICmpInst::ICMP_UGT:
4323    Pred = ICmpInst::ICMP_ULT;
4324    std::swap(LHS, RHS);
4325  case ICmpInst::ICMP_ULT: {
4326    ConstantRange LHSRange = getUnsignedRange(LHS);
4327    ConstantRange RHSRange = getUnsignedRange(RHS);
4328    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4329      return true;
4330    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4331      return false;
4332
4333    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4334    ConstantRange DiffRange = getUnsignedRange(Diff);
4335    if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin()))
4336      return true;
4337    if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax()))
4338      return false;
4339    break;
4340  }
4341  case ICmpInst::ICMP_UGE:
4342    Pred = ICmpInst::ICMP_ULE;
4343    std::swap(LHS, RHS);
4344  case ICmpInst::ICMP_ULE: {
4345    ConstantRange LHSRange = getUnsignedRange(LHS);
4346    ConstantRange RHSRange = getUnsignedRange(RHS);
4347    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4348      return true;
4349    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4350      return false;
4351
4352    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4353    ConstantRange DiffRange = getUnsignedRange(Diff);
4354    if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin()))
4355      return true;
4356    if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax()))
4357      return false;
4358    break;
4359  }
4360  case ICmpInst::ICMP_NE: {
4361    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4362      return true;
4363    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4364      return true;
4365
4366    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4367    if (isKnownNonZero(Diff))
4368      return true;
4369    break;
4370  }
4371  case ICmpInst::ICMP_EQ:
4372    break;
4373  }
4374  return false;
4375}
4376
4377/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4378/// protected by a conditional between LHS and RHS.  This is used to
4379/// to eliminate casts.
4380bool
4381ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4382                                             ICmpInst::Predicate Pred,
4383                                             const SCEV *LHS, const SCEV *RHS) {
4384  // Interpret a null as meaning no loop, where there is obviously no guard
4385  // (interprocedural conditions notwithstanding).
4386  if (!L) return true;
4387
4388  BasicBlock *Latch = L->getLoopLatch();
4389  if (!Latch)
4390    return false;
4391
4392  BranchInst *LoopContinuePredicate =
4393    dyn_cast<BranchInst>(Latch->getTerminator());
4394  if (!LoopContinuePredicate ||
4395      LoopContinuePredicate->isUnconditional())
4396    return false;
4397
4398  return
4399    isNecessaryCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4400                    LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4401}
4402
4403/// isLoopGuardedByCond - Test whether entry to the loop is protected
4404/// by a conditional between LHS and RHS.  This is used to help avoid max
4405/// expressions in loop trip counts, and to eliminate casts.
4406bool
4407ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4408                                     ICmpInst::Predicate Pred,
4409                                     const SCEV *LHS, const SCEV *RHS) {
4410  // Interpret a null as meaning no loop, where there is obviously no guard
4411  // (interprocedural conditions notwithstanding).
4412  if (!L) return false;
4413
4414  BasicBlock *Predecessor = getLoopPredecessor(L);
4415  BasicBlock *PredecessorDest = L->getHeader();
4416
4417  // Starting at the loop predecessor, climb up the predecessor chain, as long
4418  // as there are predecessors that can be found that have unique successors
4419  // leading to the original header.
4420  for (; Predecessor;
4421       PredecessorDest = Predecessor,
4422       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4423
4424    BranchInst *LoopEntryPredicate =
4425      dyn_cast<BranchInst>(Predecessor->getTerminator());
4426    if (!LoopEntryPredicate ||
4427        LoopEntryPredicate->isUnconditional())
4428      continue;
4429
4430    if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4431                        LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4432      return true;
4433  }
4434
4435  return false;
4436}
4437
4438/// isNecessaryCond - Test whether the condition described by Pred, LHS,
4439/// and RHS is a necessary condition for the given Cond value to evaluate
4440/// to true.
4441bool ScalarEvolution::isNecessaryCond(Value *CondValue,
4442                                      ICmpInst::Predicate Pred,
4443                                      const SCEV *LHS, const SCEV *RHS,
4444                                      bool Inverse) {
4445  // Recursivly handle And and Or conditions.
4446  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4447    if (BO->getOpcode() == Instruction::And) {
4448      if (!Inverse)
4449        return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4450               isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4451    } else if (BO->getOpcode() == Instruction::Or) {
4452      if (Inverse)
4453        return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4454               isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4455    }
4456  }
4457
4458  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4459  if (!ICI) return false;
4460
4461  // Now that we found a conditional branch that dominates the loop, check to
4462  // see if it is the comparison we are looking for.
4463  Value *PreCondLHS = ICI->getOperand(0);
4464  Value *PreCondRHS = ICI->getOperand(1);
4465  ICmpInst::Predicate FoundPred;
4466  if (Inverse)
4467    FoundPred = ICI->getInversePredicate();
4468  else
4469    FoundPred = ICI->getPredicate();
4470
4471  if (FoundPred == Pred)
4472    ; // An exact match.
4473  else if (!ICmpInst::isTrueWhenEqual(FoundPred) && Pred == ICmpInst::ICMP_NE) {
4474    // The actual condition is beyond sufficient.
4475    FoundPred = ICmpInst::ICMP_NE;
4476    // NE is symmetric but the original comparison may not be. Swap
4477    // the operands if necessary so that they match below.
4478    if (isa<SCEVConstant>(LHS))
4479      std::swap(PreCondLHS, PreCondRHS);
4480  } else
4481    // Check a few special cases.
4482    switch (FoundPred) {
4483    case ICmpInst::ICMP_UGT:
4484      if (Pred == ICmpInst::ICMP_ULT) {
4485        std::swap(PreCondLHS, PreCondRHS);
4486        FoundPred = ICmpInst::ICMP_ULT;
4487        break;
4488      }
4489      return false;
4490    case ICmpInst::ICMP_SGT:
4491      if (Pred == ICmpInst::ICMP_SLT) {
4492        std::swap(PreCondLHS, PreCondRHS);
4493        FoundPred = ICmpInst::ICMP_SLT;
4494        break;
4495      }
4496      return false;
4497    case ICmpInst::ICMP_NE:
4498      // Expressions like (x >u 0) are often canonicalized to (x != 0),
4499      // so check for this case by checking if the NE is comparing against
4500      // a minimum or maximum constant.
4501      if (!ICmpInst::isTrueWhenEqual(Pred))
4502        if (const SCEVConstant *C = dyn_cast<SCEVConstant>(RHS)) {
4503          const APInt &A = C->getValue()->getValue();
4504          switch (Pred) {
4505          case ICmpInst::ICMP_SLT:
4506            if (A.isMaxSignedValue()) break;
4507            return false;
4508          case ICmpInst::ICMP_SGT:
4509            if (A.isMinSignedValue()) break;
4510            return false;
4511          case ICmpInst::ICMP_ULT:
4512            if (A.isMaxValue()) break;
4513            return false;
4514          case ICmpInst::ICMP_UGT:
4515            if (A.isMinValue()) break;
4516            return false;
4517          default:
4518            return false;
4519          }
4520          FoundPred = Pred;
4521          // NE is symmetric but the original comparison may not be. Swap
4522          // the operands if necessary so that they match below.
4523          if (isa<SCEVConstant>(LHS))
4524            std::swap(PreCondLHS, PreCondRHS);
4525          break;
4526        }
4527      return false;
4528    default:
4529      // We weren't able to reconcile the condition.
4530      return false;
4531    }
4532
4533  assert(Pred == FoundPred && "Conditions were not reconciled!");
4534
4535  // Bail if the ICmp's operands' types are wider than the needed type
4536  // before attempting to call getSCEV on them. This avoids infinite
4537  // recursion, since the analysis of widening casts can require loop
4538  // exit condition information for overflow checking, which would
4539  // lead back here.
4540  if (getTypeSizeInBits(LHS->getType()) <
4541      getTypeSizeInBits(PreCondLHS->getType()))
4542    return false;
4543
4544  const SCEV *FoundLHS = getSCEV(PreCondLHS);
4545  const SCEV *FoundRHS = getSCEV(PreCondRHS);
4546
4547  // Balance the types. The case where FoundLHS' type is wider than
4548  // LHS' type is checked for above.
4549  if (getTypeSizeInBits(LHS->getType()) >
4550      getTypeSizeInBits(FoundLHS->getType())) {
4551    if (CmpInst::isSigned(Pred)) {
4552      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4553      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4554    } else {
4555      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4556      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4557    }
4558  }
4559
4560  return isNecessaryCondOperands(Pred, LHS, RHS,
4561                                 FoundLHS, FoundRHS) ||
4562         // ~x < ~y --> x > y
4563         isNecessaryCondOperands(Pred, LHS, RHS,
4564                                 getNotSCEV(FoundRHS), getNotSCEV(FoundLHS));
4565}
4566
4567/// isNecessaryCondOperands - Test whether the condition described by Pred,
4568/// LHS, and RHS is a necessary condition for the condition described by
4569/// Pred, FoundLHS, and FoundRHS to evaluate to true.
4570bool
4571ScalarEvolution::isNecessaryCondOperands(ICmpInst::Predicate Pred,
4572                                         const SCEV *LHS, const SCEV *RHS,
4573                                         const SCEV *FoundLHS,
4574                                         const SCEV *FoundRHS) {
4575  switch (Pred) {
4576  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4577  case ICmpInst::ICMP_EQ:
4578  case ICmpInst::ICMP_NE:
4579    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
4580      return true;
4581    break;
4582  case ICmpInst::ICMP_SLT:
4583  case ICmpInst::ICMP_SLE:
4584    if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
4585        isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
4586      return true;
4587    break;
4588  case ICmpInst::ICMP_SGT:
4589  case ICmpInst::ICMP_SGE:
4590    if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
4591        isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
4592      return true;
4593    break;
4594  case ICmpInst::ICMP_ULT:
4595  case ICmpInst::ICMP_ULE:
4596    if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
4597        isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
4598      return true;
4599    break;
4600  case ICmpInst::ICMP_UGT:
4601  case ICmpInst::ICMP_UGE:
4602    if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
4603        isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
4604      return true;
4605    break;
4606  }
4607
4608  return false;
4609}
4610
4611/// getBECount - Subtract the end and start values and divide by the step,
4612/// rounding up, to get the number of times the backedge is executed. Return
4613/// CouldNotCompute if an intermediate computation overflows.
4614const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4615                                        const SCEV *End,
4616                                        const SCEV *Step) {
4617  const Type *Ty = Start->getType();
4618  const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4619  const SCEV *Diff = getMinusSCEV(End, Start);
4620  const SCEV *RoundUp = getAddExpr(Step, NegOne);
4621
4622  // Add an adjustment to the difference between End and Start so that
4623  // the division will effectively round up.
4624  const SCEV *Add = getAddExpr(Diff, RoundUp);
4625
4626  // Check Add for unsigned overflow.
4627  // TODO: More sophisticated things could be done here.
4628  const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1);
4629  const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
4630  const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
4631  const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
4632  if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4633    return getCouldNotCompute();
4634
4635  return getUDivExpr(Add, Step);
4636}
4637
4638/// HowManyLessThans - Return the number of times a backedge containing the
4639/// specified less-than comparison will execute.  If not computable, return
4640/// CouldNotCompute.
4641ScalarEvolution::BackedgeTakenInfo
4642ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4643                                  const Loop *L, bool isSigned) {
4644  // Only handle:  "ADDREC < LoopInvariant".
4645  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4646
4647  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4648  if (!AddRec || AddRec->getLoop() != L)
4649    return getCouldNotCompute();
4650
4651  if (AddRec->isAffine()) {
4652    // FORNOW: We only support unit strides.
4653    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4654    const SCEV *Step = AddRec->getStepRecurrence(*this);
4655
4656    // TODO: handle non-constant strides.
4657    const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4658    if (!CStep || CStep->isZero())
4659      return getCouldNotCompute();
4660    if (CStep->isOne()) {
4661      // With unit stride, the iteration never steps past the limit value.
4662    } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4663      if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4664        // Test whether a positive iteration iteration can step past the limit
4665        // value and past the maximum value for its type in a single step.
4666        if (isSigned) {
4667          APInt Max = APInt::getSignedMaxValue(BitWidth);
4668          if ((Max - CStep->getValue()->getValue())
4669                .slt(CLimit->getValue()->getValue()))
4670            return getCouldNotCompute();
4671        } else {
4672          APInt Max = APInt::getMaxValue(BitWidth);
4673          if ((Max - CStep->getValue()->getValue())
4674                .ult(CLimit->getValue()->getValue()))
4675            return getCouldNotCompute();
4676        }
4677      } else
4678        // TODO: handle non-constant limit values below.
4679        return getCouldNotCompute();
4680    } else
4681      // TODO: handle negative strides below.
4682      return getCouldNotCompute();
4683
4684    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4685    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
4686    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4687    // treat m-n as signed nor unsigned due to overflow possibility.
4688
4689    // First, we get the value of the LHS in the first iteration: n
4690    const SCEV *Start = AddRec->getOperand(0);
4691
4692    // Determine the minimum constant start value.
4693    const SCEV *MinStart = getConstant(isSigned ?
4694      getSignedRange(Start).getSignedMin() :
4695      getUnsignedRange(Start).getUnsignedMin());
4696
4697    // If we know that the condition is true in order to enter the loop,
4698    // then we know that it will run exactly (m-n)/s times. Otherwise, we
4699    // only know that it will execute (max(m,n)-n)/s times. In both cases,
4700    // the division must round up.
4701    const SCEV *End = RHS;
4702    if (!isLoopGuardedByCond(L,
4703                             isSigned ? ICmpInst::ICMP_SLT :
4704                                        ICmpInst::ICMP_ULT,
4705                             getMinusSCEV(Start, Step), RHS))
4706      End = isSigned ? getSMaxExpr(RHS, Start)
4707                     : getUMaxExpr(RHS, Start);
4708
4709    // Determine the maximum constant end value.
4710    const SCEV *MaxEnd = getConstant(isSigned ?
4711      getSignedRange(End).getSignedMax() :
4712      getUnsignedRange(End).getUnsignedMax());
4713
4714    // Finally, we subtract these two values and divide, rounding up, to get
4715    // the number of times the backedge is executed.
4716    const SCEV *BECount = getBECount(Start, End, Step);
4717
4718    // The maximum backedge count is similar, except using the minimum start
4719    // value and the maximum end value.
4720    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4721
4722    return BackedgeTakenInfo(BECount, MaxBECount);
4723  }
4724
4725  return getCouldNotCompute();
4726}
4727
4728/// getNumIterationsInRange - Return the number of iterations of this loop that
4729/// produce values in the specified constant range.  Another way of looking at
4730/// this is that it returns the first iteration number where the value is not in
4731/// the condition, thus computing the exit count. If the iteration count can't
4732/// be computed, an instance of SCEVCouldNotCompute is returned.
4733const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4734                                                    ScalarEvolution &SE) const {
4735  if (Range.isFullSet())  // Infinite loop.
4736    return SE.getCouldNotCompute();
4737
4738  // If the start is a non-zero constant, shift the range to simplify things.
4739  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4740    if (!SC->getValue()->isZero()) {
4741      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4742      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4743      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4744      if (const SCEVAddRecExpr *ShiftedAddRec =
4745            dyn_cast<SCEVAddRecExpr>(Shifted))
4746        return ShiftedAddRec->getNumIterationsInRange(
4747                           Range.subtract(SC->getValue()->getValue()), SE);
4748      // This is strange and shouldn't happen.
4749      return SE.getCouldNotCompute();
4750    }
4751
4752  // The only time we can solve this is when we have all constant indices.
4753  // Otherwise, we cannot determine the overflow conditions.
4754  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4755    if (!isa<SCEVConstant>(getOperand(i)))
4756      return SE.getCouldNotCompute();
4757
4758
4759  // Okay at this point we know that all elements of the chrec are constants and
4760  // that the start element is zero.
4761
4762  // First check to see if the range contains zero.  If not, the first
4763  // iteration exits.
4764  unsigned BitWidth = SE.getTypeSizeInBits(getType());
4765  if (!Range.contains(APInt(BitWidth, 0)))
4766    return SE.getIntegerSCEV(0, getType());
4767
4768  if (isAffine()) {
4769    // If this is an affine expression then we have this situation:
4770    //   Solve {0,+,A} in Range  ===  Ax in Range
4771
4772    // We know that zero is in the range.  If A is positive then we know that
4773    // the upper value of the range must be the first possible exit value.
4774    // If A is negative then the lower of the range is the last possible loop
4775    // value.  Also note that we already checked for a full range.
4776    APInt One(BitWidth,1);
4777    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4778    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4779
4780    // The exit value should be (End+A)/A.
4781    APInt ExitVal = (End + A).udiv(A);
4782    ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal);
4783
4784    // Evaluate at the exit value.  If we really did fall out of the valid
4785    // range, then we computed our trip count, otherwise wrap around or other
4786    // things must have happened.
4787    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4788    if (Range.contains(Val->getValue()))
4789      return SE.getCouldNotCompute();  // Something strange happened
4790
4791    // Ensure that the previous value is in the range.  This is a sanity check.
4792    assert(Range.contains(
4793           EvaluateConstantChrecAtConstant(this,
4794           SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) &&
4795           "Linear scev computation is off in a bad way!");
4796    return SE.getConstant(ExitValue);
4797  } else if (isQuadratic()) {
4798    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4799    // quadratic equation to solve it.  To do this, we must frame our problem in
4800    // terms of figuring out when zero is crossed, instead of when
4801    // Range.getUpper() is crossed.
4802    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4803    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4804    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4805
4806    // Next, solve the constructed addrec
4807    std::pair<const SCEV *,const SCEV *> Roots =
4808      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4809    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4810    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4811    if (R1) {
4812      // Pick the smallest positive root value.
4813      if (ConstantInt *CB =
4814          dyn_cast<ConstantInt>(
4815                       SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT,
4816                         R1->getValue(), R2->getValue()))) {
4817        if (CB->getZExtValue() == false)
4818          std::swap(R1, R2);   // R1 is the minimum root now.
4819
4820        // Make sure the root is not off by one.  The returned iteration should
4821        // not be in the range, but the previous one should be.  When solving
4822        // for "X*X < 5", for example, we should not return a root of 2.
4823        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4824                                                             R1->getValue(),
4825                                                             SE);
4826        if (Range.contains(R1Val->getValue())) {
4827          // The next iteration must be out of the range...
4828          ConstantInt *NextVal =
4829                 SE.getContext()->getConstantInt(R1->getValue()->getValue()+1);
4830
4831          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4832          if (!Range.contains(R1Val->getValue()))
4833            return SE.getConstant(NextVal);
4834          return SE.getCouldNotCompute();  // Something strange happened
4835        }
4836
4837        // If R1 was not in the range, then it is a good return value.  Make
4838        // sure that R1-1 WAS in the range though, just in case.
4839        ConstantInt *NextVal =
4840                 SE.getContext()->getConstantInt(R1->getValue()->getValue()-1);
4841        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4842        if (Range.contains(R1Val->getValue()))
4843          return R1;
4844        return SE.getCouldNotCompute();  // Something strange happened
4845      }
4846    }
4847  }
4848
4849  return SE.getCouldNotCompute();
4850}
4851
4852
4853
4854//===----------------------------------------------------------------------===//
4855//                   SCEVCallbackVH Class Implementation
4856//===----------------------------------------------------------------------===//
4857
4858void ScalarEvolution::SCEVCallbackVH::deleted() {
4859  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
4860  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4861    SE->ConstantEvolutionLoopExitValue.erase(PN);
4862  if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4863    SE->ValuesAtScopes.erase(I);
4864  SE->Scalars.erase(getValPtr());
4865  // this now dangles!
4866}
4867
4868void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4869  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
4870
4871  // Forget all the expressions associated with users of the old value,
4872  // so that future queries will recompute the expressions using the new
4873  // value.
4874  SmallVector<User *, 16> Worklist;
4875  SmallPtrSet<User *, 8> Visited;
4876  Value *Old = getValPtr();
4877  bool DeleteOld = false;
4878  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4879       UI != UE; ++UI)
4880    Worklist.push_back(*UI);
4881  while (!Worklist.empty()) {
4882    User *U = Worklist.pop_back_val();
4883    // Deleting the Old value will cause this to dangle. Postpone
4884    // that until everything else is done.
4885    if (U == Old) {
4886      DeleteOld = true;
4887      continue;
4888    }
4889    if (!Visited.insert(U))
4890      continue;
4891    if (PHINode *PN = dyn_cast<PHINode>(U))
4892      SE->ConstantEvolutionLoopExitValue.erase(PN);
4893    if (Instruction *I = dyn_cast<Instruction>(U))
4894      SE->ValuesAtScopes.erase(I);
4895    SE->Scalars.erase(U);
4896    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4897         UI != UE; ++UI)
4898      Worklist.push_back(*UI);
4899  }
4900  // Delete the Old value if it (indirectly) references itself.
4901  if (DeleteOld) {
4902    if (PHINode *PN = dyn_cast<PHINode>(Old))
4903      SE->ConstantEvolutionLoopExitValue.erase(PN);
4904    if (Instruction *I = dyn_cast<Instruction>(Old))
4905      SE->ValuesAtScopes.erase(I);
4906    SE->Scalars.erase(Old);
4907    // this now dangles!
4908  }
4909  // this may dangle!
4910}
4911
4912ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4913  : CallbackVH(V), SE(se) {}
4914
4915//===----------------------------------------------------------------------===//
4916//                   ScalarEvolution Class Implementation
4917//===----------------------------------------------------------------------===//
4918
4919ScalarEvolution::ScalarEvolution()
4920  : FunctionPass(&ID) {
4921}
4922
4923bool ScalarEvolution::runOnFunction(Function &F) {
4924  this->F = &F;
4925  LI = &getAnalysis<LoopInfo>();
4926  TD = getAnalysisIfAvailable<TargetData>();
4927  return false;
4928}
4929
4930void ScalarEvolution::releaseMemory() {
4931  Scalars.clear();
4932  BackedgeTakenCounts.clear();
4933  ConstantEvolutionLoopExitValue.clear();
4934  ValuesAtScopes.clear();
4935  UniqueSCEVs.clear();
4936  SCEVAllocator.Reset();
4937}
4938
4939void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4940  AU.setPreservesAll();
4941  AU.addRequiredTransitive<LoopInfo>();
4942}
4943
4944bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4945  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4946}
4947
4948static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4949                          const Loop *L) {
4950  // Print all inner loops first
4951  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4952    PrintLoopInfo(OS, SE, *I);
4953
4954  OS << "Loop " << L->getHeader()->getName() << ": ";
4955
4956  SmallVector<BasicBlock*, 8> ExitBlocks;
4957  L->getExitBlocks(ExitBlocks);
4958  if (ExitBlocks.size() != 1)
4959    OS << "<multiple exits> ";
4960
4961  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4962    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4963  } else {
4964    OS << "Unpredictable backedge-taken count. ";
4965  }
4966
4967  OS << "\n";
4968  OS << "Loop " << L->getHeader()->getName() << ": ";
4969
4970  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4971    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4972  } else {
4973    OS << "Unpredictable max backedge-taken count. ";
4974  }
4975
4976  OS << "\n";
4977}
4978
4979void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4980  // ScalarEvolution's implementaiton of the print method is to print
4981  // out SCEV values of all instructions that are interesting. Doing
4982  // this potentially causes it to create new SCEV objects though,
4983  // which technically conflicts with the const qualifier. This isn't
4984  // observable from outside the class though, so casting away the
4985  // const isn't dangerous.
4986  ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4987
4988  OS << "Classifying expressions for: " << F->getName() << "\n";
4989  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4990    if (isSCEVable(I->getType())) {
4991      OS << *I << '\n';
4992      OS << "  -->  ";
4993      const SCEV *SV = SE.getSCEV(&*I);
4994      SV->print(OS);
4995
4996      const Loop *L = LI->getLoopFor((*I).getParent());
4997
4998      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
4999      if (AtUse != SV) {
5000        OS << "  -->  ";
5001        AtUse->print(OS);
5002      }
5003
5004      if (L) {
5005        OS << "\t\t" "Exits: ";
5006        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5007        if (!ExitValue->isLoopInvariant(L)) {
5008          OS << "<<Unknown>>";
5009        } else {
5010          OS << *ExitValue;
5011        }
5012      }
5013
5014      OS << "\n";
5015    }
5016
5017  OS << "Determining loop execution counts for: " << F->getName() << "\n";
5018  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5019    PrintLoopInfo(OS, &SE, *I);
5020}
5021
5022void ScalarEvolution::print(std::ostream &o, const Module *M) const {
5023  raw_os_ostream OS(o);
5024  print(OS, M);
5025}
5026