ScalarEvolution.cpp revision f9a9a9928cc977970d9852292b1c139074ecf055
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle.  These classes are reference counted, managed by the SCEVHandle
18// class.  We only create one SCEV of a particular shape, so pointer-comparisons
19// for equality are legal.
20//
21// One important aspect of the SCEV objects is that they are never cyclic, even
22// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
23// the PHI node is one of the idioms that we can represent (e.g., a polynomial
24// recurrence) then we represent it directly as a recurrence node, otherwise we
25// represent it as a SCEVUnknown node.
26//
27// In addition to being able to represent expressions of various types, we also
28// have folders that are used to build the *canonical* representation for a
29// particular expression.  These folders are capable of using a variety of
30// rewrite rules to simplify the expressions.
31//
32// Once the folders are defined, we can implement the more interesting
33// higher-level code, such as the code that recognizes PHI nodes of various
34// types, computes the execution count of a loop, etc.
35//
36// TODO: We should use these routines and value representations to implement
37// dependence analysis!
38//
39//===----------------------------------------------------------------------===//
40//
41// There are several good references for the techniques used in this analysis.
42//
43//  Chains of recurrences -- a method to expedite the evaluation
44//  of closed-form functions
45//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46//
47//  On computational properties of chains of recurrences
48//  Eugene V. Zima
49//
50//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51//  Robert A. van Engelen
52//
53//  Efficient Symbolic Analysis for Optimizing Compilers
54//  Robert A. van Engelen
55//
56//  Using the chains of recurrences algebra for data dependence testing and
57//  induction variable substitution
58//  MS Thesis, Johnie Birch
59//
60//===----------------------------------------------------------------------===//
61
62#define DEBUG_TYPE "scalar-evolution"
63#include "llvm/Analysis/ScalarEvolutionExpressions.h"
64#include "llvm/Constants.h"
65#include "llvm/DerivedTypes.h"
66#include "llvm/GlobalVariable.h"
67#include "llvm/Instructions.h"
68#include "llvm/Analysis/ConstantFolding.h"
69#include "llvm/Analysis/Dominators.h"
70#include "llvm/Analysis/LoopInfo.h"
71#include "llvm/Analysis/ValueTracking.h"
72#include "llvm/Assembly/Writer.h"
73#include "llvm/Target/TargetData.h"
74#include "llvm/Support/CommandLine.h"
75#include "llvm/Support/Compiler.h"
76#include "llvm/Support/ConstantRange.h"
77#include "llvm/Support/GetElementPtrTypeIterator.h"
78#include "llvm/Support/InstIterator.h"
79#include "llvm/Support/ManagedStatic.h"
80#include "llvm/Support/MathExtras.h"
81#include "llvm/Support/raw_ostream.h"
82#include "llvm/ADT/Statistic.h"
83#include "llvm/ADT/STLExtras.h"
84#include <algorithm>
85using namespace llvm;
86
87STATISTIC(NumArrayLenItCounts,
88          "Number of trip counts computed with array length");
89STATISTIC(NumTripCountsComputed,
90          "Number of loops with predictable loop counts");
91STATISTIC(NumTripCountsNotComputed,
92          "Number of loops without predictable loop counts");
93STATISTIC(NumBruteForceTripCountsComputed,
94          "Number of loops with trip counts computed by force");
95
96static cl::opt<unsigned>
97MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
98                        cl::desc("Maximum number of iterations SCEV will "
99                                 "symbolically execute a constant derived loop"),
100                        cl::init(100));
101
102static RegisterPass<ScalarEvolution>
103R("scalar-evolution", "Scalar Evolution Analysis", false, true);
104char ScalarEvolution::ID = 0;
105
106//===----------------------------------------------------------------------===//
107//                           SCEV class definitions
108//===----------------------------------------------------------------------===//
109
110//===----------------------------------------------------------------------===//
111// Implementation of the SCEV class.
112//
113SCEV::~SCEV() {}
114void SCEV::dump() const {
115  print(errs());
116  errs() << '\n';
117}
118
119void SCEV::print(std::ostream &o) const {
120  raw_os_ostream OS(o);
121  print(OS);
122}
123
124bool SCEV::isZero() const {
125  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
126    return SC->getValue()->isZero();
127  return false;
128}
129
130bool SCEV::isOne() const {
131  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
132    return SC->getValue()->isOne();
133  return false;
134}
135
136SCEVCouldNotCompute::SCEVCouldNotCompute(const ScalarEvolution* p) :
137  SCEV(scCouldNotCompute, p) {}
138SCEVCouldNotCompute::~SCEVCouldNotCompute() {}
139
140bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
141  assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
142  return false;
143}
144
145const Type *SCEVCouldNotCompute::getType() const {
146  assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
147  return 0;
148}
149
150bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
151  assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
152  return false;
153}
154
155SCEVHandle SCEVCouldNotCompute::
156replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym,
157                                  const SCEVHandle &Conc,
158                                  ScalarEvolution &SE) const {
159  return this;
160}
161
162void SCEVCouldNotCompute::print(raw_ostream &OS) const {
163  OS << "***COULDNOTCOMPUTE***";
164}
165
166bool SCEVCouldNotCompute::classof(const SCEV *S) {
167  return S->getSCEVType() == scCouldNotCompute;
168}
169
170
171// SCEVConstants - Only allow the creation of one SCEVConstant for any
172// particular value.  Don't use a SCEVHandle here, or else the object will
173// never be deleted!
174static ManagedStatic<std::map<ConstantInt*, SCEVConstant*> > SCEVConstants;
175
176
177SCEVConstant::~SCEVConstant() {
178  SCEVConstants->erase(V);
179}
180
181SCEVHandle ScalarEvolution::getConstant(ConstantInt *V) {
182  SCEVConstant *&R = (*SCEVConstants)[V];
183  if (R == 0) R = new SCEVConstant(V, this);
184  return R;
185}
186
187SCEVHandle ScalarEvolution::getConstant(const APInt& Val) {
188  return getConstant(ConstantInt::get(Val));
189}
190
191SCEVHandle
192ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
193  return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
194}
195
196const Type *SCEVConstant::getType() const { return V->getType(); }
197
198void SCEVConstant::print(raw_ostream &OS) const {
199  WriteAsOperand(OS, V, false);
200}
201
202SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy,
203                           const SCEVHandle &op, const Type *ty,
204                           const ScalarEvolution* p)
205  : SCEV(SCEVTy, p), Op(op), Ty(ty) {}
206
207SCEVCastExpr::~SCEVCastExpr() {}
208
209bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
210  return Op->dominates(BB, DT);
211}
212
213// SCEVTruncates - Only allow the creation of one SCEVTruncateExpr for any
214// particular input.  Don't use a SCEVHandle here, or else the object will
215// never be deleted!
216static ManagedStatic<std::map<std::pair<const SCEV*, const Type*>,
217                     SCEVTruncateExpr*> > SCEVTruncates;
218
219SCEVTruncateExpr::SCEVTruncateExpr(const SCEVHandle &op, const Type *ty,
220                                   const ScalarEvolution* p)
221  : SCEVCastExpr(scTruncate, op, ty, p) {
222  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
223         (Ty->isInteger() || isa<PointerType>(Ty)) &&
224         "Cannot truncate non-integer value!");
225}
226
227SCEVTruncateExpr::~SCEVTruncateExpr() {
228  SCEVTruncates->erase(std::make_pair(Op, Ty));
229}
230
231void SCEVTruncateExpr::print(raw_ostream &OS) const {
232  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
233}
234
235// SCEVZeroExtends - Only allow the creation of one SCEVZeroExtendExpr for any
236// particular input.  Don't use a SCEVHandle here, or else the object will never
237// be deleted!
238static ManagedStatic<std::map<std::pair<const SCEV*, const Type*>,
239                     SCEVZeroExtendExpr*> > SCEVZeroExtends;
240
241SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEVHandle &op, const Type *ty,
242                                       const ScalarEvolution* p)
243  : SCEVCastExpr(scZeroExtend, op, ty, p) {
244  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
245         (Ty->isInteger() || isa<PointerType>(Ty)) &&
246         "Cannot zero extend non-integer value!");
247}
248
249SCEVZeroExtendExpr::~SCEVZeroExtendExpr() {
250  SCEVZeroExtends->erase(std::make_pair(Op, Ty));
251}
252
253void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
254  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
255}
256
257// SCEVSignExtends - Only allow the creation of one SCEVSignExtendExpr for any
258// particular input.  Don't use a SCEVHandle here, or else the object will never
259// be deleted!
260static ManagedStatic<std::map<std::pair<const SCEV*, const Type*>,
261                     SCEVSignExtendExpr*> > SCEVSignExtends;
262
263SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEVHandle &op, const Type *ty,
264                                       const ScalarEvolution* p)
265  : SCEVCastExpr(scSignExtend, op, ty, p) {
266  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
267         (Ty->isInteger() || isa<PointerType>(Ty)) &&
268         "Cannot sign extend non-integer value!");
269}
270
271SCEVSignExtendExpr::~SCEVSignExtendExpr() {
272  SCEVSignExtends->erase(std::make_pair(Op, Ty));
273}
274
275void SCEVSignExtendExpr::print(raw_ostream &OS) const {
276  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
277}
278
279// SCEVCommExprs - Only allow the creation of one SCEVCommutativeExpr for any
280// particular input.  Don't use a SCEVHandle here, or else the object will never
281// be deleted!
282static ManagedStatic<std::map<std::pair<unsigned, std::vector<const SCEV*> >,
283                     SCEVCommutativeExpr*> > SCEVCommExprs;
284
285SCEVCommutativeExpr::~SCEVCommutativeExpr() {
286  std::vector<const SCEV*> SCEVOps(Operands.begin(), Operands.end());
287  SCEVCommExprs->erase(std::make_pair(getSCEVType(), SCEVOps));
288}
289
290void SCEVCommutativeExpr::print(raw_ostream &OS) const {
291  assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
292  const char *OpStr = getOperationStr();
293  OS << "(" << *Operands[0];
294  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
295    OS << OpStr << *Operands[i];
296  OS << ")";
297}
298
299SCEVHandle SCEVCommutativeExpr::
300replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym,
301                                  const SCEVHandle &Conc,
302                                  ScalarEvolution &SE) const {
303  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
304    SCEVHandle H =
305      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
306    if (H != getOperand(i)) {
307      SmallVector<SCEVHandle, 8> NewOps;
308      NewOps.reserve(getNumOperands());
309      for (unsigned j = 0; j != i; ++j)
310        NewOps.push_back(getOperand(j));
311      NewOps.push_back(H);
312      for (++i; i != e; ++i)
313        NewOps.push_back(getOperand(i)->
314                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
315
316      if (isa<SCEVAddExpr>(this))
317        return SE.getAddExpr(NewOps);
318      else if (isa<SCEVMulExpr>(this))
319        return SE.getMulExpr(NewOps);
320      else if (isa<SCEVSMaxExpr>(this))
321        return SE.getSMaxExpr(NewOps);
322      else if (isa<SCEVUMaxExpr>(this))
323        return SE.getUMaxExpr(NewOps);
324      else
325        assert(0 && "Unknown commutative expr!");
326    }
327  }
328  return this;
329}
330
331bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
332  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
333    if (!getOperand(i)->dominates(BB, DT))
334      return false;
335  }
336  return true;
337}
338
339
340// SCEVUDivs - Only allow the creation of one SCEVUDivExpr for any particular
341// input.  Don't use a SCEVHandle here, or else the object will never be
342// deleted!
343static ManagedStatic<std::map<std::pair<const SCEV*, const SCEV*>,
344                     SCEVUDivExpr*> > SCEVUDivs;
345
346SCEVUDivExpr::~SCEVUDivExpr() {
347  SCEVUDivs->erase(std::make_pair(LHS, RHS));
348}
349
350bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
351  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
352}
353
354void SCEVUDivExpr::print(raw_ostream &OS) const {
355  OS << "(" << *LHS << " /u " << *RHS << ")";
356}
357
358const Type *SCEVUDivExpr::getType() const {
359  // In most cases the types of LHS and RHS will be the same, but in some
360  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
361  // depend on the type for correctness, but handling types carefully can
362  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
363  // a pointer type than the RHS, so use the RHS' type here.
364  return RHS->getType();
365}
366
367// SCEVAddRecExprs - Only allow the creation of one SCEVAddRecExpr for any
368// particular input.  Don't use a SCEVHandle here, or else the object will never
369// be deleted!
370static ManagedStatic<std::map<std::pair<const Loop *,
371                                        std::vector<const SCEV*> >,
372                     SCEVAddRecExpr*> > SCEVAddRecExprs;
373
374SCEVAddRecExpr::~SCEVAddRecExpr() {
375  std::vector<const SCEV*> SCEVOps(Operands.begin(), Operands.end());
376  SCEVAddRecExprs->erase(std::make_pair(L, SCEVOps));
377}
378
379SCEVHandle SCEVAddRecExpr::
380replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym,
381                                  const SCEVHandle &Conc,
382                                  ScalarEvolution &SE) const {
383  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
384    SCEVHandle H =
385      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
386    if (H != getOperand(i)) {
387      SmallVector<SCEVHandle, 8> NewOps;
388      NewOps.reserve(getNumOperands());
389      for (unsigned j = 0; j != i; ++j)
390        NewOps.push_back(getOperand(j));
391      NewOps.push_back(H);
392      for (++i; i != e; ++i)
393        NewOps.push_back(getOperand(i)->
394                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
395
396      return SE.getAddRecExpr(NewOps, L);
397    }
398  }
399  return this;
400}
401
402
403bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
404  // This recurrence is invariant w.r.t to QueryLoop iff QueryLoop doesn't
405  // contain L and if the start is invariant.
406  // Add recurrences are never invariant in the function-body (null loop).
407  return QueryLoop &&
408         !QueryLoop->contains(L->getHeader()) &&
409         getOperand(0)->isLoopInvariant(QueryLoop);
410}
411
412
413void SCEVAddRecExpr::print(raw_ostream &OS) const {
414  OS << "{" << *Operands[0];
415  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
416    OS << ",+," << *Operands[i];
417  OS << "}<" << L->getHeader()->getName() + ">";
418}
419
420// SCEVUnknowns - Only allow the creation of one SCEVUnknown for any particular
421// value.  Don't use a SCEVHandle here, or else the object will never be
422// deleted!
423static ManagedStatic<std::map<Value*, SCEVUnknown*> > SCEVUnknowns;
424
425SCEVUnknown::~SCEVUnknown() { SCEVUnknowns->erase(V); }
426
427bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
428  // All non-instruction values are loop invariant.  All instructions are loop
429  // invariant if they are not contained in the specified loop.
430  // Instructions are never considered invariant in the function body
431  // (null loop) because they are defined within the "loop".
432  if (Instruction *I = dyn_cast<Instruction>(V))
433    return L && !L->contains(I->getParent());
434  return true;
435}
436
437bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
438  if (Instruction *I = dyn_cast<Instruction>(getValue()))
439    return DT->dominates(I->getParent(), BB);
440  return true;
441}
442
443const Type *SCEVUnknown::getType() const {
444  return V->getType();
445}
446
447void SCEVUnknown::print(raw_ostream &OS) const {
448  WriteAsOperand(OS, V, false);
449}
450
451//===----------------------------------------------------------------------===//
452//                               SCEV Utilities
453//===----------------------------------------------------------------------===//
454
455namespace {
456  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
457  /// than the complexity of the RHS.  This comparator is used to canonicalize
458  /// expressions.
459  class VISIBILITY_HIDDEN SCEVComplexityCompare {
460    LoopInfo *LI;
461  public:
462    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
463
464    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
465      // Primarily, sort the SCEVs by their getSCEVType().
466      if (LHS->getSCEVType() != RHS->getSCEVType())
467        return LHS->getSCEVType() < RHS->getSCEVType();
468
469      // Aside from the getSCEVType() ordering, the particular ordering
470      // isn't very important except that it's beneficial to be consistent,
471      // so that (a + b) and (b + a) don't end up as different expressions.
472
473      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
474      // not as complete as it could be.
475      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
476        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
477
478        // Order pointer values after integer values. This helps SCEVExpander
479        // form GEPs.
480        if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
481          return false;
482        if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
483          return true;
484
485        // Compare getValueID values.
486        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
487          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
488
489        // Sort arguments by their position.
490        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
491          const Argument *RA = cast<Argument>(RU->getValue());
492          return LA->getArgNo() < RA->getArgNo();
493        }
494
495        // For instructions, compare their loop depth, and their opcode.
496        // This is pretty loose.
497        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
498          Instruction *RV = cast<Instruction>(RU->getValue());
499
500          // Compare loop depths.
501          if (LI->getLoopDepth(LV->getParent()) !=
502              LI->getLoopDepth(RV->getParent()))
503            return LI->getLoopDepth(LV->getParent()) <
504                   LI->getLoopDepth(RV->getParent());
505
506          // Compare opcodes.
507          if (LV->getOpcode() != RV->getOpcode())
508            return LV->getOpcode() < RV->getOpcode();
509
510          // Compare the number of operands.
511          if (LV->getNumOperands() != RV->getNumOperands())
512            return LV->getNumOperands() < RV->getNumOperands();
513        }
514
515        return false;
516      }
517
518      // Compare constant values.
519      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
520        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
521        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
522      }
523
524      // Compare addrec loop depths.
525      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
526        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
527        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
528          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
529      }
530
531      // Lexicographically compare n-ary expressions.
532      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
533        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
534        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
535          if (i >= RC->getNumOperands())
536            return false;
537          if (operator()(LC->getOperand(i), RC->getOperand(i)))
538            return true;
539          if (operator()(RC->getOperand(i), LC->getOperand(i)))
540            return false;
541        }
542        return LC->getNumOperands() < RC->getNumOperands();
543      }
544
545      // Lexicographically compare udiv expressions.
546      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
547        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
548        if (operator()(LC->getLHS(), RC->getLHS()))
549          return true;
550        if (operator()(RC->getLHS(), LC->getLHS()))
551          return false;
552        if (operator()(LC->getRHS(), RC->getRHS()))
553          return true;
554        if (operator()(RC->getRHS(), LC->getRHS()))
555          return false;
556        return false;
557      }
558
559      // Compare cast expressions by operand.
560      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
561        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
562        return operator()(LC->getOperand(), RC->getOperand());
563      }
564
565      assert(0 && "Unknown SCEV kind!");
566      return false;
567    }
568  };
569}
570
571/// GroupByComplexity - Given a list of SCEV objects, order them by their
572/// complexity, and group objects of the same complexity together by value.
573/// When this routine is finished, we know that any duplicates in the vector are
574/// consecutive and that complexity is monotonically increasing.
575///
576/// Note that we go take special precautions to ensure that we get determinstic
577/// results from this routine.  In other words, we don't want the results of
578/// this to depend on where the addresses of various SCEV objects happened to
579/// land in memory.
580///
581static void GroupByComplexity(SmallVectorImpl<SCEVHandle> &Ops,
582                              LoopInfo *LI) {
583  if (Ops.size() < 2) return;  // Noop
584  if (Ops.size() == 2) {
585    // This is the common case, which also happens to be trivially simple.
586    // Special case it.
587    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
588      std::swap(Ops[0], Ops[1]);
589    return;
590  }
591
592  // Do the rough sort by complexity.
593  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
594
595  // Now that we are sorted by complexity, group elements of the same
596  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
597  // be extremely short in practice.  Note that we take this approach because we
598  // do not want to depend on the addresses of the objects we are grouping.
599  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
600    const SCEV *S = Ops[i];
601    unsigned Complexity = S->getSCEVType();
602
603    // If there are any objects of the same complexity and same value as this
604    // one, group them.
605    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
606      if (Ops[j] == S) { // Found a duplicate.
607        // Move it to immediately after i'th element.
608        std::swap(Ops[i+1], Ops[j]);
609        ++i;   // no need to rescan it.
610        if (i == e-2) return;  // Done!
611      }
612    }
613  }
614}
615
616
617
618//===----------------------------------------------------------------------===//
619//                      Simple SCEV method implementations
620//===----------------------------------------------------------------------===//
621
622/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
623/// Assume, K > 0.
624static SCEVHandle BinomialCoefficient(SCEVHandle It, unsigned K,
625                                      ScalarEvolution &SE,
626                                      const Type* ResultTy) {
627  // Handle the simplest case efficiently.
628  if (K == 1)
629    return SE.getTruncateOrZeroExtend(It, ResultTy);
630
631  // We are using the following formula for BC(It, K):
632  //
633  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
634  //
635  // Suppose, W is the bitwidth of the return value.  We must be prepared for
636  // overflow.  Hence, we must assure that the result of our computation is
637  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
638  // safe in modular arithmetic.
639  //
640  // However, this code doesn't use exactly that formula; the formula it uses
641  // is something like the following, where T is the number of factors of 2 in
642  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
643  // exponentiation:
644  //
645  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
646  //
647  // This formula is trivially equivalent to the previous formula.  However,
648  // this formula can be implemented much more efficiently.  The trick is that
649  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
650  // arithmetic.  To do exact division in modular arithmetic, all we have
651  // to do is multiply by the inverse.  Therefore, this step can be done at
652  // width W.
653  //
654  // The next issue is how to safely do the division by 2^T.  The way this
655  // is done is by doing the multiplication step at a width of at least W + T
656  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
657  // when we perform the division by 2^T (which is equivalent to a right shift
658  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
659  // truncated out after the division by 2^T.
660  //
661  // In comparison to just directly using the first formula, this technique
662  // is much more efficient; using the first formula requires W * K bits,
663  // but this formula less than W + K bits. Also, the first formula requires
664  // a division step, whereas this formula only requires multiplies and shifts.
665  //
666  // It doesn't matter whether the subtraction step is done in the calculation
667  // width or the input iteration count's width; if the subtraction overflows,
668  // the result must be zero anyway.  We prefer here to do it in the width of
669  // the induction variable because it helps a lot for certain cases; CodeGen
670  // isn't smart enough to ignore the overflow, which leads to much less
671  // efficient code if the width of the subtraction is wider than the native
672  // register width.
673  //
674  // (It's possible to not widen at all by pulling out factors of 2 before
675  // the multiplication; for example, K=2 can be calculated as
676  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
677  // extra arithmetic, so it's not an obvious win, and it gets
678  // much more complicated for K > 3.)
679
680  // Protection from insane SCEVs; this bound is conservative,
681  // but it probably doesn't matter.
682  if (K > 1000)
683    return SE.getCouldNotCompute();
684
685  unsigned W = SE.getTypeSizeInBits(ResultTy);
686
687  // Calculate K! / 2^T and T; we divide out the factors of two before
688  // multiplying for calculating K! / 2^T to avoid overflow.
689  // Other overflow doesn't matter because we only care about the bottom
690  // W bits of the result.
691  APInt OddFactorial(W, 1);
692  unsigned T = 1;
693  for (unsigned i = 3; i <= K; ++i) {
694    APInt Mult(W, i);
695    unsigned TwoFactors = Mult.countTrailingZeros();
696    T += TwoFactors;
697    Mult = Mult.lshr(TwoFactors);
698    OddFactorial *= Mult;
699  }
700
701  // We need at least W + T bits for the multiplication step
702  unsigned CalculationBits = W + T;
703
704  // Calcuate 2^T, at width T+W.
705  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
706
707  // Calculate the multiplicative inverse of K! / 2^T;
708  // this multiplication factor will perform the exact division by
709  // K! / 2^T.
710  APInt Mod = APInt::getSignedMinValue(W+1);
711  APInt MultiplyFactor = OddFactorial.zext(W+1);
712  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
713  MultiplyFactor = MultiplyFactor.trunc(W);
714
715  // Calculate the product, at width T+W
716  const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
717  SCEVHandle Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
718  for (unsigned i = 1; i != K; ++i) {
719    SCEVHandle S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
720    Dividend = SE.getMulExpr(Dividend,
721                             SE.getTruncateOrZeroExtend(S, CalculationTy));
722  }
723
724  // Divide by 2^T
725  SCEVHandle DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
726
727  // Truncate the result, and divide by K! / 2^T.
728
729  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
730                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
731}
732
733/// evaluateAtIteration - Return the value of this chain of recurrences at
734/// the specified iteration number.  We can evaluate this recurrence by
735/// multiplying each element in the chain by the binomial coefficient
736/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
737///
738///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
739///
740/// where BC(It, k) stands for binomial coefficient.
741///
742SCEVHandle SCEVAddRecExpr::evaluateAtIteration(SCEVHandle It,
743                                               ScalarEvolution &SE) const {
744  SCEVHandle Result = getStart();
745  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
746    // The computation is correct in the face of overflow provided that the
747    // multiplication is performed _after_ the evaluation of the binomial
748    // coefficient.
749    SCEVHandle Coeff = BinomialCoefficient(It, i, SE, getType());
750    if (isa<SCEVCouldNotCompute>(Coeff))
751      return Coeff;
752
753    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
754  }
755  return Result;
756}
757
758//===----------------------------------------------------------------------===//
759//                    SCEV Expression folder implementations
760//===----------------------------------------------------------------------===//
761
762SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op,
763                                            const Type *Ty) {
764  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
765         "This is not a truncating conversion!");
766  assert(isSCEVable(Ty) &&
767         "This is not a conversion to a SCEVable type!");
768  Ty = getEffectiveSCEVType(Ty);
769
770  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
771    return getUnknown(
772        ConstantExpr::getTrunc(SC->getValue(), Ty));
773
774  // trunc(trunc(x)) --> trunc(x)
775  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
776    return getTruncateExpr(ST->getOperand(), Ty);
777
778  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
779  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
780    return getTruncateOrSignExtend(SS->getOperand(), Ty);
781
782  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
783  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
784    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
785
786  // If the input value is a chrec scev, truncate the chrec's operands.
787  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
788    SmallVector<SCEVHandle, 4> Operands;
789    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
790      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
791    return getAddRecExpr(Operands, AddRec->getLoop());
792  }
793
794  SCEVTruncateExpr *&Result = (*SCEVTruncates)[std::make_pair(Op, Ty)];
795  if (Result == 0) Result = new SCEVTruncateExpr(Op, Ty, this);
796  return Result;
797}
798
799SCEVHandle ScalarEvolution::getZeroExtendExpr(const SCEVHandle &Op,
800                                              const Type *Ty) {
801  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
802         "This is not an extending conversion!");
803  assert(isSCEVable(Ty) &&
804         "This is not a conversion to a SCEVable type!");
805  Ty = getEffectiveSCEVType(Ty);
806
807  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
808    const Type *IntTy = getEffectiveSCEVType(Ty);
809    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
810    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
811    return getUnknown(C);
812  }
813
814  // zext(zext(x)) --> zext(x)
815  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
816    return getZeroExtendExpr(SZ->getOperand(), Ty);
817
818  // If the input value is a chrec scev, and we can prove that the value
819  // did not overflow the old, smaller, value, we can zero extend all of the
820  // operands (often constants).  This allows analysis of something like
821  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
822  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
823    if (AR->isAffine()) {
824      // Check whether the backedge-taken count is SCEVCouldNotCompute.
825      // Note that this serves two purposes: It filters out loops that are
826      // simply not analyzable, and it covers the case where this code is
827      // being called from within backedge-taken count analysis, such that
828      // attempting to ask for the backedge-taken count would likely result
829      // in infinite recursion. In the later case, the analysis code will
830      // cope with a conservative value, and it will take care to purge
831      // that value once it has finished.
832      SCEVHandle MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
833      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
834        // Manually compute the final value for AR, checking for
835        // overflow.
836        SCEVHandle Start = AR->getStart();
837        SCEVHandle Step = AR->getStepRecurrence(*this);
838
839        // Check whether the backedge-taken count can be losslessly casted to
840        // the addrec's type. The count is always unsigned.
841        SCEVHandle CastedMaxBECount =
842          getTruncateOrZeroExtend(MaxBECount, Start->getType());
843        SCEVHandle RecastedMaxBECount =
844          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
845        if (MaxBECount == RecastedMaxBECount) {
846          const Type *WideTy =
847            IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
848          // Check whether Start+Step*MaxBECount has no unsigned overflow.
849          SCEVHandle ZMul =
850            getMulExpr(CastedMaxBECount,
851                       getTruncateOrZeroExtend(Step, Start->getType()));
852          SCEVHandle Add = getAddExpr(Start, ZMul);
853          SCEVHandle OperandExtendedAdd =
854            getAddExpr(getZeroExtendExpr(Start, WideTy),
855                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
856                                  getZeroExtendExpr(Step, WideTy)));
857          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
858            // Return the expression with the addrec on the outside.
859            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
860                                 getZeroExtendExpr(Step, Ty),
861                                 AR->getLoop());
862
863          // Similar to above, only this time treat the step value as signed.
864          // This covers loops that count down.
865          SCEVHandle SMul =
866            getMulExpr(CastedMaxBECount,
867                       getTruncateOrSignExtend(Step, Start->getType()));
868          Add = getAddExpr(Start, SMul);
869          OperandExtendedAdd =
870            getAddExpr(getZeroExtendExpr(Start, WideTy),
871                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
872                                  getSignExtendExpr(Step, WideTy)));
873          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
874            // Return the expression with the addrec on the outside.
875            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
876                                 getSignExtendExpr(Step, Ty),
877                                 AR->getLoop());
878        }
879      }
880    }
881
882  SCEVZeroExtendExpr *&Result = (*SCEVZeroExtends)[std::make_pair(Op, Ty)];
883  if (Result == 0) Result = new SCEVZeroExtendExpr(Op, Ty, this);
884  return Result;
885}
886
887SCEVHandle ScalarEvolution::getSignExtendExpr(const SCEVHandle &Op,
888                                              const Type *Ty) {
889  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
890         "This is not an extending conversion!");
891  assert(isSCEVable(Ty) &&
892         "This is not a conversion to a SCEVable type!");
893  Ty = getEffectiveSCEVType(Ty);
894
895  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
896    const Type *IntTy = getEffectiveSCEVType(Ty);
897    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
898    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
899    return getUnknown(C);
900  }
901
902  // sext(sext(x)) --> sext(x)
903  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
904    return getSignExtendExpr(SS->getOperand(), Ty);
905
906  // If the input value is a chrec scev, and we can prove that the value
907  // did not overflow the old, smaller, value, we can sign extend all of the
908  // operands (often constants).  This allows analysis of something like
909  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
910  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
911    if (AR->isAffine()) {
912      // Check whether the backedge-taken count is SCEVCouldNotCompute.
913      // Note that this serves two purposes: It filters out loops that are
914      // simply not analyzable, and it covers the case where this code is
915      // being called from within backedge-taken count analysis, such that
916      // attempting to ask for the backedge-taken count would likely result
917      // in infinite recursion. In the later case, the analysis code will
918      // cope with a conservative value, and it will take care to purge
919      // that value once it has finished.
920      SCEVHandle MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
921      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
922        // Manually compute the final value for AR, checking for
923        // overflow.
924        SCEVHandle Start = AR->getStart();
925        SCEVHandle Step = AR->getStepRecurrence(*this);
926
927        // Check whether the backedge-taken count can be losslessly casted to
928        // the addrec's type. The count is always unsigned.
929        SCEVHandle CastedMaxBECount =
930          getTruncateOrZeroExtend(MaxBECount, Start->getType());
931        SCEVHandle RecastedMaxBECount =
932          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
933        if (MaxBECount == RecastedMaxBECount) {
934          const Type *WideTy =
935            IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
936          // Check whether Start+Step*MaxBECount has no signed overflow.
937          SCEVHandle SMul =
938            getMulExpr(CastedMaxBECount,
939                       getTruncateOrSignExtend(Step, Start->getType()));
940          SCEVHandle Add = getAddExpr(Start, SMul);
941          SCEVHandle OperandExtendedAdd =
942            getAddExpr(getSignExtendExpr(Start, WideTy),
943                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
944                                  getSignExtendExpr(Step, WideTy)));
945          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
946            // Return the expression with the addrec on the outside.
947            return getAddRecExpr(getSignExtendExpr(Start, Ty),
948                                 getSignExtendExpr(Step, Ty),
949                                 AR->getLoop());
950        }
951      }
952    }
953
954  SCEVSignExtendExpr *&Result = (*SCEVSignExtends)[std::make_pair(Op, Ty)];
955  if (Result == 0) Result = new SCEVSignExtendExpr(Op, Ty, this);
956  return Result;
957}
958
959/// getAnyExtendExpr - Return a SCEV for the given operand extended with
960/// unspecified bits out to the given type.
961///
962SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op,
963                                             const Type *Ty) {
964  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
965         "This is not an extending conversion!");
966  assert(isSCEVable(Ty) &&
967         "This is not a conversion to a SCEVable type!");
968  Ty = getEffectiveSCEVType(Ty);
969
970  // Sign-extend negative constants.
971  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
972    if (SC->getValue()->getValue().isNegative())
973      return getSignExtendExpr(Op, Ty);
974
975  // Peel off a truncate cast.
976  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
977    SCEVHandle NewOp = T->getOperand();
978    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
979      return getAnyExtendExpr(NewOp, Ty);
980    return getTruncateOrNoop(NewOp, Ty);
981  }
982
983  // Next try a zext cast. If the cast is folded, use it.
984  SCEVHandle ZExt = getZeroExtendExpr(Op, Ty);
985  if (!isa<SCEVZeroExtendExpr>(ZExt))
986    return ZExt;
987
988  // Next try a sext cast. If the cast is folded, use it.
989  SCEVHandle SExt = getSignExtendExpr(Op, Ty);
990  if (!isa<SCEVSignExtendExpr>(SExt))
991    return SExt;
992
993  // If the expression is obviously signed, use the sext cast value.
994  if (isa<SCEVSMaxExpr>(Op))
995    return SExt;
996
997  // Absent any other information, use the zext cast value.
998  return ZExt;
999}
1000
1001/// CollectAddOperandsWithScales - Process the given Ops list, which is
1002/// a list of operands to be added under the given scale, update the given
1003/// map. This is a helper function for getAddRecExpr. As an example of
1004/// what it does, given a sequence of operands that would form an add
1005/// expression like this:
1006///
1007///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1008///
1009/// where A and B are constants, update the map with these values:
1010///
1011///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1012///
1013/// and add 13 + A*B*29 to AccumulatedConstant.
1014/// This will allow getAddRecExpr to produce this:
1015///
1016///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1017///
1018/// This form often exposes folding opportunities that are hidden in
1019/// the original operand list.
1020///
1021/// Return true iff it appears that any interesting folding opportunities
1022/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1023/// the common case where no interesting opportunities are present, and
1024/// is also used as a check to avoid infinite recursion.
1025///
1026static bool
1027CollectAddOperandsWithScales(DenseMap<SCEVHandle, APInt> &M,
1028                             SmallVector<SCEVHandle, 8> &NewOps,
1029                             APInt &AccumulatedConstant,
1030                             const SmallVectorImpl<SCEVHandle> &Ops,
1031                             const APInt &Scale,
1032                             ScalarEvolution &SE) {
1033  bool Interesting = false;
1034
1035  // Iterate over the add operands.
1036  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1037    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1038    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1039      APInt NewScale =
1040        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1041      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1042        // A multiplication of a constant with another add; recurse.
1043        Interesting |=
1044          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1045                                       cast<SCEVAddExpr>(Mul->getOperand(1))
1046                                         ->getOperands(),
1047                                       NewScale, SE);
1048      } else {
1049        // A multiplication of a constant with some other value. Update
1050        // the map.
1051        SmallVector<SCEVHandle, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1052        SCEVHandle Key = SE.getMulExpr(MulOps);
1053        std::pair<DenseMap<SCEVHandle, APInt>::iterator, bool> Pair =
1054          M.insert(std::make_pair(Key, APInt()));
1055        if (Pair.second) {
1056          Pair.first->second = NewScale;
1057          NewOps.push_back(Pair.first->first);
1058        } else {
1059          Pair.first->second += NewScale;
1060          // The map already had an entry for this value, which may indicate
1061          // a folding opportunity.
1062          Interesting = true;
1063        }
1064      }
1065    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1066      // Pull a buried constant out to the outside.
1067      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1068        Interesting = true;
1069      AccumulatedConstant += Scale * C->getValue()->getValue();
1070    } else {
1071      // An ordinary operand. Update the map.
1072      std::pair<DenseMap<SCEVHandle, APInt>::iterator, bool> Pair =
1073        M.insert(std::make_pair(Ops[i], APInt()));
1074      if (Pair.second) {
1075        Pair.first->second = Scale;
1076        NewOps.push_back(Pair.first->first);
1077      } else {
1078        Pair.first->second += Scale;
1079        // The map already had an entry for this value, which may indicate
1080        // a folding opportunity.
1081        Interesting = true;
1082      }
1083    }
1084  }
1085
1086  return Interesting;
1087}
1088
1089namespace {
1090  struct APIntCompare {
1091    bool operator()(const APInt &LHS, const APInt &RHS) const {
1092      return LHS.ult(RHS);
1093    }
1094  };
1095}
1096
1097/// getAddExpr - Get a canonical add expression, or something simpler if
1098/// possible.
1099SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl<SCEVHandle> &Ops) {
1100  assert(!Ops.empty() && "Cannot get empty add!");
1101  if (Ops.size() == 1) return Ops[0];
1102#ifndef NDEBUG
1103  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1104    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1105           getEffectiveSCEVType(Ops[0]->getType()) &&
1106           "SCEVAddExpr operand types don't match!");
1107#endif
1108
1109  // Sort by complexity, this groups all similar expression types together.
1110  GroupByComplexity(Ops, LI);
1111
1112  // If there are any constants, fold them together.
1113  unsigned Idx = 0;
1114  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1115    ++Idx;
1116    assert(Idx < Ops.size());
1117    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1118      // We found two constants, fold them together!
1119      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1120                           RHSC->getValue()->getValue());
1121      if (Ops.size() == 2) return Ops[0];
1122      Ops.erase(Ops.begin()+1);  // Erase the folded element
1123      LHSC = cast<SCEVConstant>(Ops[0]);
1124    }
1125
1126    // If we are left with a constant zero being added, strip it off.
1127    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1128      Ops.erase(Ops.begin());
1129      --Idx;
1130    }
1131  }
1132
1133  if (Ops.size() == 1) return Ops[0];
1134
1135  // Okay, check to see if the same value occurs in the operand list twice.  If
1136  // so, merge them together into an multiply expression.  Since we sorted the
1137  // list, these values are required to be adjacent.
1138  const Type *Ty = Ops[0]->getType();
1139  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1140    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1141      // Found a match, merge the two values into a multiply, and add any
1142      // remaining values to the result.
1143      SCEVHandle Two = getIntegerSCEV(2, Ty);
1144      SCEVHandle Mul = getMulExpr(Ops[i], Two);
1145      if (Ops.size() == 2)
1146        return Mul;
1147      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1148      Ops.push_back(Mul);
1149      return getAddExpr(Ops);
1150    }
1151
1152  // Check for truncates. If all the operands are truncated from the same
1153  // type, see if factoring out the truncate would permit the result to be
1154  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1155  // if the contents of the resulting outer trunc fold to something simple.
1156  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1157    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1158    const Type *DstType = Trunc->getType();
1159    const Type *SrcType = Trunc->getOperand()->getType();
1160    SmallVector<SCEVHandle, 8> LargeOps;
1161    bool Ok = true;
1162    // Check all the operands to see if they can be represented in the
1163    // source type of the truncate.
1164    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1165      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1166        if (T->getOperand()->getType() != SrcType) {
1167          Ok = false;
1168          break;
1169        }
1170        LargeOps.push_back(T->getOperand());
1171      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1172        // This could be either sign or zero extension, but sign extension
1173        // is much more likely to be foldable here.
1174        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1175      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1176        SmallVector<SCEVHandle, 8> LargeMulOps;
1177        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1178          if (const SCEVTruncateExpr *T =
1179                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1180            if (T->getOperand()->getType() != SrcType) {
1181              Ok = false;
1182              break;
1183            }
1184            LargeMulOps.push_back(T->getOperand());
1185          } else if (const SCEVConstant *C =
1186                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1187            // This could be either sign or zero extension, but sign extension
1188            // is much more likely to be foldable here.
1189            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1190          } else {
1191            Ok = false;
1192            break;
1193          }
1194        }
1195        if (Ok)
1196          LargeOps.push_back(getMulExpr(LargeMulOps));
1197      } else {
1198        Ok = false;
1199        break;
1200      }
1201    }
1202    if (Ok) {
1203      // Evaluate the expression in the larger type.
1204      SCEVHandle Fold = getAddExpr(LargeOps);
1205      // If it folds to something simple, use it. Otherwise, don't.
1206      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1207        return getTruncateExpr(Fold, DstType);
1208    }
1209  }
1210
1211  // Skip past any other cast SCEVs.
1212  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1213    ++Idx;
1214
1215  // If there are add operands they would be next.
1216  if (Idx < Ops.size()) {
1217    bool DeletedAdd = false;
1218    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1219      // If we have an add, expand the add operands onto the end of the operands
1220      // list.
1221      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1222      Ops.erase(Ops.begin()+Idx);
1223      DeletedAdd = true;
1224    }
1225
1226    // If we deleted at least one add, we added operands to the end of the list,
1227    // and they are not necessarily sorted.  Recurse to resort and resimplify
1228    // any operands we just aquired.
1229    if (DeletedAdd)
1230      return getAddExpr(Ops);
1231  }
1232
1233  // Skip over the add expression until we get to a multiply.
1234  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1235    ++Idx;
1236
1237  // Check to see if there are any folding opportunities present with
1238  // operands multiplied by constant values.
1239  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1240    uint64_t BitWidth = getTypeSizeInBits(Ty);
1241    DenseMap<SCEVHandle, APInt> M;
1242    SmallVector<SCEVHandle, 8> NewOps;
1243    APInt AccumulatedConstant(BitWidth, 0);
1244    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1245                                     Ops, APInt(BitWidth, 1), *this)) {
1246      // Some interesting folding opportunity is present, so its worthwhile to
1247      // re-generate the operands list. Group the operands by constant scale,
1248      // to avoid multiplying by the same constant scale multiple times.
1249      std::map<APInt, SmallVector<SCEVHandle, 4>, APIntCompare> MulOpLists;
1250      for (SmallVector<SCEVHandle, 8>::iterator I = NewOps.begin(),
1251           E = NewOps.end(); I != E; ++I)
1252        MulOpLists[M.find(*I)->second].push_back(*I);
1253      // Re-generate the operands list.
1254      Ops.clear();
1255      if (AccumulatedConstant != 0)
1256        Ops.push_back(getConstant(AccumulatedConstant));
1257      for (std::map<APInt, SmallVector<SCEVHandle, 4>, APIntCompare>::iterator I =
1258           MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1259        if (I->first != 0)
1260          Ops.push_back(getMulExpr(getConstant(I->first), getAddExpr(I->second)));
1261      if (Ops.empty())
1262        return getIntegerSCEV(0, Ty);
1263      if (Ops.size() == 1)
1264        return Ops[0];
1265      return getAddExpr(Ops);
1266    }
1267  }
1268
1269  // If we are adding something to a multiply expression, make sure the
1270  // something is not already an operand of the multiply.  If so, merge it into
1271  // the multiply.
1272  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1273    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1274    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1275      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1276      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1277        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1278          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1279          SCEVHandle InnerMul = Mul->getOperand(MulOp == 0);
1280          if (Mul->getNumOperands() != 2) {
1281            // If the multiply has more than two operands, we must get the
1282            // Y*Z term.
1283            SmallVector<SCEVHandle, 4> MulOps(Mul->op_begin(), Mul->op_end());
1284            MulOps.erase(MulOps.begin()+MulOp);
1285            InnerMul = getMulExpr(MulOps);
1286          }
1287          SCEVHandle One = getIntegerSCEV(1, Ty);
1288          SCEVHandle AddOne = getAddExpr(InnerMul, One);
1289          SCEVHandle OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1290          if (Ops.size() == 2) return OuterMul;
1291          if (AddOp < Idx) {
1292            Ops.erase(Ops.begin()+AddOp);
1293            Ops.erase(Ops.begin()+Idx-1);
1294          } else {
1295            Ops.erase(Ops.begin()+Idx);
1296            Ops.erase(Ops.begin()+AddOp-1);
1297          }
1298          Ops.push_back(OuterMul);
1299          return getAddExpr(Ops);
1300        }
1301
1302      // Check this multiply against other multiplies being added together.
1303      for (unsigned OtherMulIdx = Idx+1;
1304           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1305           ++OtherMulIdx) {
1306        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1307        // If MulOp occurs in OtherMul, we can fold the two multiplies
1308        // together.
1309        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1310             OMulOp != e; ++OMulOp)
1311          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1312            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1313            SCEVHandle InnerMul1 = Mul->getOperand(MulOp == 0);
1314            if (Mul->getNumOperands() != 2) {
1315              SmallVector<SCEVHandle, 4> MulOps(Mul->op_begin(), Mul->op_end());
1316              MulOps.erase(MulOps.begin()+MulOp);
1317              InnerMul1 = getMulExpr(MulOps);
1318            }
1319            SCEVHandle InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1320            if (OtherMul->getNumOperands() != 2) {
1321              SmallVector<SCEVHandle, 4> MulOps(OtherMul->op_begin(),
1322                                             OtherMul->op_end());
1323              MulOps.erase(MulOps.begin()+OMulOp);
1324              InnerMul2 = getMulExpr(MulOps);
1325            }
1326            SCEVHandle InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1327            SCEVHandle OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1328            if (Ops.size() == 2) return OuterMul;
1329            Ops.erase(Ops.begin()+Idx);
1330            Ops.erase(Ops.begin()+OtherMulIdx-1);
1331            Ops.push_back(OuterMul);
1332            return getAddExpr(Ops);
1333          }
1334      }
1335    }
1336  }
1337
1338  // If there are any add recurrences in the operands list, see if any other
1339  // added values are loop invariant.  If so, we can fold them into the
1340  // recurrence.
1341  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1342    ++Idx;
1343
1344  // Scan over all recurrences, trying to fold loop invariants into them.
1345  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1346    // Scan all of the other operands to this add and add them to the vector if
1347    // they are loop invariant w.r.t. the recurrence.
1348    SmallVector<SCEVHandle, 8> LIOps;
1349    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1350    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1351      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1352        LIOps.push_back(Ops[i]);
1353        Ops.erase(Ops.begin()+i);
1354        --i; --e;
1355      }
1356
1357    // If we found some loop invariants, fold them into the recurrence.
1358    if (!LIOps.empty()) {
1359      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1360      LIOps.push_back(AddRec->getStart());
1361
1362      SmallVector<SCEVHandle, 4> AddRecOps(AddRec->op_begin(),
1363                                           AddRec->op_end());
1364      AddRecOps[0] = getAddExpr(LIOps);
1365
1366      SCEVHandle NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1367      // If all of the other operands were loop invariant, we are done.
1368      if (Ops.size() == 1) return NewRec;
1369
1370      // Otherwise, add the folded AddRec by the non-liv parts.
1371      for (unsigned i = 0;; ++i)
1372        if (Ops[i] == AddRec) {
1373          Ops[i] = NewRec;
1374          break;
1375        }
1376      return getAddExpr(Ops);
1377    }
1378
1379    // Okay, if there weren't any loop invariants to be folded, check to see if
1380    // there are multiple AddRec's with the same loop induction variable being
1381    // added together.  If so, we can fold them.
1382    for (unsigned OtherIdx = Idx+1;
1383         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1384      if (OtherIdx != Idx) {
1385        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1386        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1387          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1388          SmallVector<SCEVHandle, 4> NewOps(AddRec->op_begin(), AddRec->op_end());
1389          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1390            if (i >= NewOps.size()) {
1391              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1392                            OtherAddRec->op_end());
1393              break;
1394            }
1395            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1396          }
1397          SCEVHandle NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1398
1399          if (Ops.size() == 2) return NewAddRec;
1400
1401          Ops.erase(Ops.begin()+Idx);
1402          Ops.erase(Ops.begin()+OtherIdx-1);
1403          Ops.push_back(NewAddRec);
1404          return getAddExpr(Ops);
1405        }
1406      }
1407
1408    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1409    // next one.
1410  }
1411
1412  // Okay, it looks like we really DO need an add expr.  Check to see if we
1413  // already have one, otherwise create a new one.
1414  std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1415  SCEVCommutativeExpr *&Result = (*SCEVCommExprs)[std::make_pair(scAddExpr,
1416                                                                 SCEVOps)];
1417  if (Result == 0) Result = new SCEVAddExpr(Ops, this);
1418  return Result;
1419}
1420
1421
1422/// getMulExpr - Get a canonical multiply expression, or something simpler if
1423/// possible.
1424SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl<SCEVHandle> &Ops) {
1425  assert(!Ops.empty() && "Cannot get empty mul!");
1426#ifndef NDEBUG
1427  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1428    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1429           getEffectiveSCEVType(Ops[0]->getType()) &&
1430           "SCEVMulExpr operand types don't match!");
1431#endif
1432
1433  // Sort by complexity, this groups all similar expression types together.
1434  GroupByComplexity(Ops, LI);
1435
1436  // If there are any constants, fold them together.
1437  unsigned Idx = 0;
1438  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1439
1440    // C1*(C2+V) -> C1*C2 + C1*V
1441    if (Ops.size() == 2)
1442      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1443        if (Add->getNumOperands() == 2 &&
1444            isa<SCEVConstant>(Add->getOperand(0)))
1445          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1446                            getMulExpr(LHSC, Add->getOperand(1)));
1447
1448
1449    ++Idx;
1450    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1451      // We found two constants, fold them together!
1452      ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() *
1453                                           RHSC->getValue()->getValue());
1454      Ops[0] = getConstant(Fold);
1455      Ops.erase(Ops.begin()+1);  // Erase the folded element
1456      if (Ops.size() == 1) return Ops[0];
1457      LHSC = cast<SCEVConstant>(Ops[0]);
1458    }
1459
1460    // If we are left with a constant one being multiplied, strip it off.
1461    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1462      Ops.erase(Ops.begin());
1463      --Idx;
1464    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1465      // If we have a multiply of zero, it will always be zero.
1466      return Ops[0];
1467    }
1468  }
1469
1470  // Skip over the add expression until we get to a multiply.
1471  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1472    ++Idx;
1473
1474  if (Ops.size() == 1)
1475    return Ops[0];
1476
1477  // If there are mul operands inline them all into this expression.
1478  if (Idx < Ops.size()) {
1479    bool DeletedMul = false;
1480    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1481      // If we have an mul, expand the mul operands onto the end of the operands
1482      // list.
1483      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1484      Ops.erase(Ops.begin()+Idx);
1485      DeletedMul = true;
1486    }
1487
1488    // If we deleted at least one mul, we added operands to the end of the list,
1489    // and they are not necessarily sorted.  Recurse to resort and resimplify
1490    // any operands we just aquired.
1491    if (DeletedMul)
1492      return getMulExpr(Ops);
1493  }
1494
1495  // If there are any add recurrences in the operands list, see if any other
1496  // added values are loop invariant.  If so, we can fold them into the
1497  // recurrence.
1498  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1499    ++Idx;
1500
1501  // Scan over all recurrences, trying to fold loop invariants into them.
1502  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1503    // Scan all of the other operands to this mul and add them to the vector if
1504    // they are loop invariant w.r.t. the recurrence.
1505    SmallVector<SCEVHandle, 8> LIOps;
1506    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1507    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1508      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1509        LIOps.push_back(Ops[i]);
1510        Ops.erase(Ops.begin()+i);
1511        --i; --e;
1512      }
1513
1514    // If we found some loop invariants, fold them into the recurrence.
1515    if (!LIOps.empty()) {
1516      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1517      SmallVector<SCEVHandle, 4> NewOps;
1518      NewOps.reserve(AddRec->getNumOperands());
1519      if (LIOps.size() == 1) {
1520        const SCEV *Scale = LIOps[0];
1521        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1522          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1523      } else {
1524        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1525          SmallVector<SCEVHandle, 4> MulOps(LIOps.begin(), LIOps.end());
1526          MulOps.push_back(AddRec->getOperand(i));
1527          NewOps.push_back(getMulExpr(MulOps));
1528        }
1529      }
1530
1531      SCEVHandle NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1532
1533      // If all of the other operands were loop invariant, we are done.
1534      if (Ops.size() == 1) return NewRec;
1535
1536      // Otherwise, multiply the folded AddRec by the non-liv parts.
1537      for (unsigned i = 0;; ++i)
1538        if (Ops[i] == AddRec) {
1539          Ops[i] = NewRec;
1540          break;
1541        }
1542      return getMulExpr(Ops);
1543    }
1544
1545    // Okay, if there weren't any loop invariants to be folded, check to see if
1546    // there are multiple AddRec's with the same loop induction variable being
1547    // multiplied together.  If so, we can fold them.
1548    for (unsigned OtherIdx = Idx+1;
1549         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1550      if (OtherIdx != Idx) {
1551        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1552        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1553          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1554          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1555          SCEVHandle NewStart = getMulExpr(F->getStart(),
1556                                                 G->getStart());
1557          SCEVHandle B = F->getStepRecurrence(*this);
1558          SCEVHandle D = G->getStepRecurrence(*this);
1559          SCEVHandle NewStep = getAddExpr(getMulExpr(F, D),
1560                                          getMulExpr(G, B),
1561                                          getMulExpr(B, D));
1562          SCEVHandle NewAddRec = getAddRecExpr(NewStart, NewStep,
1563                                               F->getLoop());
1564          if (Ops.size() == 2) return NewAddRec;
1565
1566          Ops.erase(Ops.begin()+Idx);
1567          Ops.erase(Ops.begin()+OtherIdx-1);
1568          Ops.push_back(NewAddRec);
1569          return getMulExpr(Ops);
1570        }
1571      }
1572
1573    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1574    // next one.
1575  }
1576
1577  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1578  // already have one, otherwise create a new one.
1579  std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1580  SCEVCommutativeExpr *&Result = (*SCEVCommExprs)[std::make_pair(scMulExpr,
1581                                                                 SCEVOps)];
1582  if (Result == 0)
1583    Result = new SCEVMulExpr(Ops, this);
1584  return Result;
1585}
1586
1587/// getUDivExpr - Get a canonical multiply expression, or something simpler if
1588/// possible.
1589SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS,
1590                                        const SCEVHandle &RHS) {
1591  assert(getEffectiveSCEVType(LHS->getType()) ==
1592         getEffectiveSCEVType(RHS->getType()) &&
1593         "SCEVUDivExpr operand types don't match!");
1594
1595  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1596    if (RHSC->getValue()->equalsInt(1))
1597      return LHS;                            // X udiv 1 --> x
1598    if (RHSC->isZero())
1599      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1600
1601    // Determine if the division can be folded into the operands of
1602    // its operands.
1603    // TODO: Generalize this to non-constants by using known-bits information.
1604    const Type *Ty = LHS->getType();
1605    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1606    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1607    // For non-power-of-two values, effectively round the value up to the
1608    // nearest power of two.
1609    if (!RHSC->getValue()->getValue().isPowerOf2())
1610      ++MaxShiftAmt;
1611    const IntegerType *ExtTy =
1612      IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1613    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1614    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1615      if (const SCEVConstant *Step =
1616            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1617        if (!Step->getValue()->getValue()
1618              .urem(RHSC->getValue()->getValue()) &&
1619            getZeroExtendExpr(AR, ExtTy) ==
1620            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1621                          getZeroExtendExpr(Step, ExtTy),
1622                          AR->getLoop())) {
1623          SmallVector<SCEVHandle, 4> Operands;
1624          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1625            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1626          return getAddRecExpr(Operands, AR->getLoop());
1627        }
1628    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1629    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1630      SmallVector<SCEVHandle, 4> Operands;
1631      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1632        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1633      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1634        // Find an operand that's safely divisible.
1635        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1636          SCEVHandle Op = M->getOperand(i);
1637          SCEVHandle Div = getUDivExpr(Op, RHSC);
1638          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1639            const SmallVectorImpl<SCEVHandle> &MOperands = M->getOperands();
1640            Operands = SmallVector<SCEVHandle, 4>(MOperands.begin(),
1641                                                  MOperands.end());
1642            Operands[i] = Div;
1643            return getMulExpr(Operands);
1644          }
1645        }
1646    }
1647    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1648    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1649      SmallVector<SCEVHandle, 4> Operands;
1650      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1651        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1652      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1653        Operands.clear();
1654        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1655          SCEVHandle Op = getUDivExpr(A->getOperand(i), RHS);
1656          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1657            break;
1658          Operands.push_back(Op);
1659        }
1660        if (Operands.size() == A->getNumOperands())
1661          return getAddExpr(Operands);
1662      }
1663    }
1664
1665    // Fold if both operands are constant.
1666    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1667      Constant *LHSCV = LHSC->getValue();
1668      Constant *RHSCV = RHSC->getValue();
1669      return getUnknown(ConstantExpr::getUDiv(LHSCV, RHSCV));
1670    }
1671  }
1672
1673  SCEVUDivExpr *&Result = (*SCEVUDivs)[std::make_pair(LHS, RHS)];
1674  if (Result == 0) Result = new SCEVUDivExpr(LHS, RHS, this);
1675  return Result;
1676}
1677
1678
1679/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1680/// Simplify the expression as much as possible.
1681SCEVHandle ScalarEvolution::getAddRecExpr(const SCEVHandle &Start,
1682                               const SCEVHandle &Step, const Loop *L) {
1683  SmallVector<SCEVHandle, 4> Operands;
1684  Operands.push_back(Start);
1685  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1686    if (StepChrec->getLoop() == L) {
1687      Operands.insert(Operands.end(), StepChrec->op_begin(),
1688                      StepChrec->op_end());
1689      return getAddRecExpr(Operands, L);
1690    }
1691
1692  Operands.push_back(Step);
1693  return getAddRecExpr(Operands, L);
1694}
1695
1696/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1697/// Simplify the expression as much as possible.
1698SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl<SCEVHandle> &Operands,
1699                                          const Loop *L) {
1700  if (Operands.size() == 1) return Operands[0];
1701#ifndef NDEBUG
1702  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1703    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1704           getEffectiveSCEVType(Operands[0]->getType()) &&
1705           "SCEVAddRecExpr operand types don't match!");
1706#endif
1707
1708  if (Operands.back()->isZero()) {
1709    Operands.pop_back();
1710    return getAddRecExpr(Operands, L);             // {X,+,0}  -->  X
1711  }
1712
1713  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1714  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1715    const Loop* NestedLoop = NestedAR->getLoop();
1716    if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1717      SmallVector<SCEVHandle, 4> NestedOperands(NestedAR->op_begin(),
1718                                                NestedAR->op_end());
1719      SCEVHandle NestedARHandle(NestedAR);
1720      Operands[0] = NestedAR->getStart();
1721      NestedOperands[0] = getAddRecExpr(Operands, L);
1722      return getAddRecExpr(NestedOperands, NestedLoop);
1723    }
1724  }
1725
1726  std::vector<const SCEV*> SCEVOps(Operands.begin(), Operands.end());
1727  SCEVAddRecExpr *&Result = (*SCEVAddRecExprs)[std::make_pair(L, SCEVOps)];
1728  if (Result == 0) Result = new SCEVAddRecExpr(Operands, L, this);
1729  return Result;
1730}
1731
1732SCEVHandle ScalarEvolution::getSMaxExpr(const SCEVHandle &LHS,
1733                                        const SCEVHandle &RHS) {
1734  SmallVector<SCEVHandle, 2> Ops;
1735  Ops.push_back(LHS);
1736  Ops.push_back(RHS);
1737  return getSMaxExpr(Ops);
1738}
1739
1740SCEVHandle
1741ScalarEvolution::getSMaxExpr(SmallVectorImpl<SCEVHandle> &Ops) {
1742  assert(!Ops.empty() && "Cannot get empty smax!");
1743  if (Ops.size() == 1) return Ops[0];
1744#ifndef NDEBUG
1745  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1746    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1747           getEffectiveSCEVType(Ops[0]->getType()) &&
1748           "SCEVSMaxExpr operand types don't match!");
1749#endif
1750
1751  // Sort by complexity, this groups all similar expression types together.
1752  GroupByComplexity(Ops, LI);
1753
1754  // If there are any constants, fold them together.
1755  unsigned Idx = 0;
1756  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1757    ++Idx;
1758    assert(Idx < Ops.size());
1759    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1760      // We found two constants, fold them together!
1761      ConstantInt *Fold = ConstantInt::get(
1762                              APIntOps::smax(LHSC->getValue()->getValue(),
1763                                             RHSC->getValue()->getValue()));
1764      Ops[0] = getConstant(Fold);
1765      Ops.erase(Ops.begin()+1);  // Erase the folded element
1766      if (Ops.size() == 1) return Ops[0];
1767      LHSC = cast<SCEVConstant>(Ops[0]);
1768    }
1769
1770    // If we are left with a constant -inf, strip it off.
1771    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1772      Ops.erase(Ops.begin());
1773      --Idx;
1774    }
1775  }
1776
1777  if (Ops.size() == 1) return Ops[0];
1778
1779  // Find the first SMax
1780  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1781    ++Idx;
1782
1783  // Check to see if one of the operands is an SMax. If so, expand its operands
1784  // onto our operand list, and recurse to simplify.
1785  if (Idx < Ops.size()) {
1786    bool DeletedSMax = false;
1787    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1788      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1789      Ops.erase(Ops.begin()+Idx);
1790      DeletedSMax = true;
1791    }
1792
1793    if (DeletedSMax)
1794      return getSMaxExpr(Ops);
1795  }
1796
1797  // Okay, check to see if the same value occurs in the operand list twice.  If
1798  // so, delete one.  Since we sorted the list, these values are required to
1799  // be adjacent.
1800  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1801    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
1802      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1803      --i; --e;
1804    }
1805
1806  if (Ops.size() == 1) return Ops[0];
1807
1808  assert(!Ops.empty() && "Reduced smax down to nothing!");
1809
1810  // Okay, it looks like we really DO need an smax expr.  Check to see if we
1811  // already have one, otherwise create a new one.
1812  std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1813  SCEVCommutativeExpr *&Result = (*SCEVCommExprs)[std::make_pair(scSMaxExpr,
1814                                                                 SCEVOps)];
1815  if (Result == 0) Result = new SCEVSMaxExpr(Ops, this);
1816  return Result;
1817}
1818
1819SCEVHandle ScalarEvolution::getUMaxExpr(const SCEVHandle &LHS,
1820                                        const SCEVHandle &RHS) {
1821  SmallVector<SCEVHandle, 2> Ops;
1822  Ops.push_back(LHS);
1823  Ops.push_back(RHS);
1824  return getUMaxExpr(Ops);
1825}
1826
1827SCEVHandle
1828ScalarEvolution::getUMaxExpr(SmallVectorImpl<SCEVHandle> &Ops) {
1829  assert(!Ops.empty() && "Cannot get empty umax!");
1830  if (Ops.size() == 1) return Ops[0];
1831#ifndef NDEBUG
1832  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1833    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1834           getEffectiveSCEVType(Ops[0]->getType()) &&
1835           "SCEVUMaxExpr operand types don't match!");
1836#endif
1837
1838  // Sort by complexity, this groups all similar expression types together.
1839  GroupByComplexity(Ops, LI);
1840
1841  // If there are any constants, fold them together.
1842  unsigned Idx = 0;
1843  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1844    ++Idx;
1845    assert(Idx < Ops.size());
1846    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1847      // We found two constants, fold them together!
1848      ConstantInt *Fold = ConstantInt::get(
1849                              APIntOps::umax(LHSC->getValue()->getValue(),
1850                                             RHSC->getValue()->getValue()));
1851      Ops[0] = getConstant(Fold);
1852      Ops.erase(Ops.begin()+1);  // Erase the folded element
1853      if (Ops.size() == 1) return Ops[0];
1854      LHSC = cast<SCEVConstant>(Ops[0]);
1855    }
1856
1857    // If we are left with a constant zero, strip it off.
1858    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1859      Ops.erase(Ops.begin());
1860      --Idx;
1861    }
1862  }
1863
1864  if (Ops.size() == 1) return Ops[0];
1865
1866  // Find the first UMax
1867  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1868    ++Idx;
1869
1870  // Check to see if one of the operands is a UMax. If so, expand its operands
1871  // onto our operand list, and recurse to simplify.
1872  if (Idx < Ops.size()) {
1873    bool DeletedUMax = false;
1874    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1875      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1876      Ops.erase(Ops.begin()+Idx);
1877      DeletedUMax = true;
1878    }
1879
1880    if (DeletedUMax)
1881      return getUMaxExpr(Ops);
1882  }
1883
1884  // Okay, check to see if the same value occurs in the operand list twice.  If
1885  // so, delete one.  Since we sorted the list, these values are required to
1886  // be adjacent.
1887  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1888    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
1889      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1890      --i; --e;
1891    }
1892
1893  if (Ops.size() == 1) return Ops[0];
1894
1895  assert(!Ops.empty() && "Reduced umax down to nothing!");
1896
1897  // Okay, it looks like we really DO need a umax expr.  Check to see if we
1898  // already have one, otherwise create a new one.
1899  std::vector<const SCEV*> SCEVOps(Ops.begin(), Ops.end());
1900  SCEVCommutativeExpr *&Result = (*SCEVCommExprs)[std::make_pair(scUMaxExpr,
1901                                                                 SCEVOps)];
1902  if (Result == 0) Result = new SCEVUMaxExpr(Ops, this);
1903  return Result;
1904}
1905
1906SCEVHandle ScalarEvolution::getSMinExpr(const SCEVHandle &LHS,
1907                                        const SCEVHandle &RHS) {
1908  // ~smax(~x, ~y) == smin(x, y).
1909  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1910}
1911
1912SCEVHandle ScalarEvolution::getUMinExpr(const SCEVHandle &LHS,
1913                                        const SCEVHandle &RHS) {
1914  // ~umax(~x, ~y) == umin(x, y)
1915  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1916}
1917
1918SCEVHandle ScalarEvolution::getUnknown(Value *V) {
1919  if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
1920    return getConstant(CI);
1921  if (isa<ConstantPointerNull>(V))
1922    return getIntegerSCEV(0, V->getType());
1923  SCEVUnknown *&Result = (*SCEVUnknowns)[V];
1924  if (Result == 0) Result = new SCEVUnknown(V, this);
1925  return Result;
1926}
1927
1928//===----------------------------------------------------------------------===//
1929//            Basic SCEV Analysis and PHI Idiom Recognition Code
1930//
1931
1932/// isSCEVable - Test if values of the given type are analyzable within
1933/// the SCEV framework. This primarily includes integer types, and it
1934/// can optionally include pointer types if the ScalarEvolution class
1935/// has access to target-specific information.
1936bool ScalarEvolution::isSCEVable(const Type *Ty) const {
1937  // Integers are always SCEVable.
1938  if (Ty->isInteger())
1939    return true;
1940
1941  // Pointers are SCEVable if TargetData information is available
1942  // to provide pointer size information.
1943  if (isa<PointerType>(Ty))
1944    return TD != NULL;
1945
1946  // Otherwise it's not SCEVable.
1947  return false;
1948}
1949
1950/// getTypeSizeInBits - Return the size in bits of the specified type,
1951/// for which isSCEVable must return true.
1952uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
1953  assert(isSCEVable(Ty) && "Type is not SCEVable!");
1954
1955  // If we have a TargetData, use it!
1956  if (TD)
1957    return TD->getTypeSizeInBits(Ty);
1958
1959  // Otherwise, we support only integer types.
1960  assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
1961  return Ty->getPrimitiveSizeInBits();
1962}
1963
1964/// getEffectiveSCEVType - Return a type with the same bitwidth as
1965/// the given type and which represents how SCEV will treat the given
1966/// type, for which isSCEVable must return true. For pointer types,
1967/// this is the pointer-sized integer type.
1968const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
1969  assert(isSCEVable(Ty) && "Type is not SCEVable!");
1970
1971  if (Ty->isInteger())
1972    return Ty;
1973
1974  assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
1975  return TD->getIntPtrType();
1976}
1977
1978SCEVHandle ScalarEvolution::getCouldNotCompute() {
1979  return CouldNotCompute;
1980}
1981
1982/// hasSCEV - Return true if the SCEV for this value has already been
1983/// computed.
1984bool ScalarEvolution::hasSCEV(Value *V) const {
1985  return Scalars.count(V);
1986}
1987
1988/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
1989/// expression and create a new one.
1990SCEVHandle ScalarEvolution::getSCEV(Value *V) {
1991  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
1992
1993  std::map<SCEVCallbackVH, SCEVHandle>::iterator I = Scalars.find(V);
1994  if (I != Scalars.end()) return I->second;
1995  SCEVHandle S = createSCEV(V);
1996  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
1997  return S;
1998}
1999
2000/// getIntegerSCEV - Given an integer or FP type, create a constant for the
2001/// specified signed integer value and return a SCEV for the constant.
2002SCEVHandle ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2003  Ty = getEffectiveSCEVType(Ty);
2004  Constant *C;
2005  if (Val == 0)
2006    C = Constant::getNullValue(Ty);
2007  else if (Ty->isFloatingPoint())
2008    C = ConstantFP::get(APFloat(Ty==Type::FloatTy ? APFloat::IEEEsingle :
2009                                APFloat::IEEEdouble, Val));
2010  else
2011    C = ConstantInt::get(Ty, Val);
2012  return getUnknown(C);
2013}
2014
2015/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2016///
2017SCEVHandle ScalarEvolution::getNegativeSCEV(const SCEVHandle &V) {
2018  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2019    return getUnknown(ConstantExpr::getNeg(VC->getValue()));
2020
2021  const Type *Ty = V->getType();
2022  Ty = getEffectiveSCEVType(Ty);
2023  return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty)));
2024}
2025
2026/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2027SCEVHandle ScalarEvolution::getNotSCEV(const SCEVHandle &V) {
2028  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2029    return getUnknown(ConstantExpr::getNot(VC->getValue()));
2030
2031  const Type *Ty = V->getType();
2032  Ty = getEffectiveSCEVType(Ty);
2033  SCEVHandle AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty));
2034  return getMinusSCEV(AllOnes, V);
2035}
2036
2037/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2038///
2039SCEVHandle ScalarEvolution::getMinusSCEV(const SCEVHandle &LHS,
2040                                         const SCEVHandle &RHS) {
2041  // X - Y --> X + -Y
2042  return getAddExpr(LHS, getNegativeSCEV(RHS));
2043}
2044
2045/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2046/// input value to the specified type.  If the type must be extended, it is zero
2047/// extended.
2048SCEVHandle
2049ScalarEvolution::getTruncateOrZeroExtend(const SCEVHandle &V,
2050                                         const Type *Ty) {
2051  const Type *SrcTy = V->getType();
2052  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2053         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2054         "Cannot truncate or zero extend with non-integer arguments!");
2055  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2056    return V;  // No conversion
2057  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2058    return getTruncateExpr(V, Ty);
2059  return getZeroExtendExpr(V, Ty);
2060}
2061
2062/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2063/// input value to the specified type.  If the type must be extended, it is sign
2064/// extended.
2065SCEVHandle
2066ScalarEvolution::getTruncateOrSignExtend(const SCEVHandle &V,
2067                                         const Type *Ty) {
2068  const Type *SrcTy = V->getType();
2069  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2070         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2071         "Cannot truncate or zero extend with non-integer arguments!");
2072  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2073    return V;  // No conversion
2074  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2075    return getTruncateExpr(V, Ty);
2076  return getSignExtendExpr(V, Ty);
2077}
2078
2079/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2080/// input value to the specified type.  If the type must be extended, it is zero
2081/// extended.  The conversion must not be narrowing.
2082SCEVHandle
2083ScalarEvolution::getNoopOrZeroExtend(const SCEVHandle &V, const Type *Ty) {
2084  const Type *SrcTy = V->getType();
2085  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2086         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2087         "Cannot noop or zero extend with non-integer arguments!");
2088  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2089         "getNoopOrZeroExtend cannot truncate!");
2090  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2091    return V;  // No conversion
2092  return getZeroExtendExpr(V, Ty);
2093}
2094
2095/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2096/// input value to the specified type.  If the type must be extended, it is sign
2097/// extended.  The conversion must not be narrowing.
2098SCEVHandle
2099ScalarEvolution::getNoopOrSignExtend(const SCEVHandle &V, const Type *Ty) {
2100  const Type *SrcTy = V->getType();
2101  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2102         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2103         "Cannot noop or sign extend with non-integer arguments!");
2104  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2105         "getNoopOrSignExtend cannot truncate!");
2106  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2107    return V;  // No conversion
2108  return getSignExtendExpr(V, Ty);
2109}
2110
2111/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2112/// the input value to the specified type. If the type must be extended,
2113/// it is extended with unspecified bits. The conversion must not be
2114/// narrowing.
2115SCEVHandle
2116ScalarEvolution::getNoopOrAnyExtend(const SCEVHandle &V, const Type *Ty) {
2117  const Type *SrcTy = V->getType();
2118  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2119         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2120         "Cannot noop or any extend with non-integer arguments!");
2121  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2122         "getNoopOrAnyExtend cannot truncate!");
2123  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2124    return V;  // No conversion
2125  return getAnyExtendExpr(V, Ty);
2126}
2127
2128/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2129/// input value to the specified type.  The conversion must not be widening.
2130SCEVHandle
2131ScalarEvolution::getTruncateOrNoop(const SCEVHandle &V, const Type *Ty) {
2132  const Type *SrcTy = V->getType();
2133  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2134         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2135         "Cannot truncate or noop with non-integer arguments!");
2136  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2137         "getTruncateOrNoop cannot extend!");
2138  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2139    return V;  // No conversion
2140  return getTruncateExpr(V, Ty);
2141}
2142
2143/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2144/// the types using zero-extension, and then perform a umax operation
2145/// with them.
2146SCEVHandle ScalarEvolution::getUMaxFromMismatchedTypes(const SCEVHandle &LHS,
2147                                                       const SCEVHandle &RHS) {
2148  SCEVHandle PromotedLHS = LHS;
2149  SCEVHandle PromotedRHS = RHS;
2150
2151  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2152    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2153  else
2154    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2155
2156  return getUMaxExpr(PromotedLHS, PromotedRHS);
2157}
2158
2159/// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2160/// the specified instruction and replaces any references to the symbolic value
2161/// SymName with the specified value.  This is used during PHI resolution.
2162void ScalarEvolution::
2163ReplaceSymbolicValueWithConcrete(Instruction *I, const SCEVHandle &SymName,
2164                                 const SCEVHandle &NewVal) {
2165  std::map<SCEVCallbackVH, SCEVHandle>::iterator SI =
2166    Scalars.find(SCEVCallbackVH(I, this));
2167  if (SI == Scalars.end()) return;
2168
2169  SCEVHandle NV =
2170    SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2171  if (NV == SI->second) return;  // No change.
2172
2173  SI->second = NV;       // Update the scalars map!
2174
2175  // Any instruction values that use this instruction might also need to be
2176  // updated!
2177  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2178       UI != E; ++UI)
2179    ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2180}
2181
2182/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2183/// a loop header, making it a potential recurrence, or it doesn't.
2184///
2185SCEVHandle ScalarEvolution::createNodeForPHI(PHINode *PN) {
2186  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2187    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2188      if (L->getHeader() == PN->getParent()) {
2189        // If it lives in the loop header, it has two incoming values, one
2190        // from outside the loop, and one from inside.
2191        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2192        unsigned BackEdge     = IncomingEdge^1;
2193
2194        // While we are analyzing this PHI node, handle its value symbolically.
2195        SCEVHandle SymbolicName = getUnknown(PN);
2196        assert(Scalars.find(PN) == Scalars.end() &&
2197               "PHI node already processed?");
2198        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2199
2200        // Using this symbolic name for the PHI, analyze the value coming around
2201        // the back-edge.
2202        SCEVHandle BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2203
2204        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2205        // has a special value for the first iteration of the loop.
2206
2207        // If the value coming around the backedge is an add with the symbolic
2208        // value we just inserted, then we found a simple induction variable!
2209        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2210          // If there is a single occurrence of the symbolic value, replace it
2211          // with a recurrence.
2212          unsigned FoundIndex = Add->getNumOperands();
2213          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2214            if (Add->getOperand(i) == SymbolicName)
2215              if (FoundIndex == e) {
2216                FoundIndex = i;
2217                break;
2218              }
2219
2220          if (FoundIndex != Add->getNumOperands()) {
2221            // Create an add with everything but the specified operand.
2222            SmallVector<SCEVHandle, 8> Ops;
2223            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2224              if (i != FoundIndex)
2225                Ops.push_back(Add->getOperand(i));
2226            SCEVHandle Accum = getAddExpr(Ops);
2227
2228            // This is not a valid addrec if the step amount is varying each
2229            // loop iteration, but is not itself an addrec in this loop.
2230            if (Accum->isLoopInvariant(L) ||
2231                (isa<SCEVAddRecExpr>(Accum) &&
2232                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2233              SCEVHandle StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2234              SCEVHandle PHISCEV  = getAddRecExpr(StartVal, Accum, L);
2235
2236              // Okay, for the entire analysis of this edge we assumed the PHI
2237              // to be symbolic.  We now need to go back and update all of the
2238              // entries for the scalars that use the PHI (except for the PHI
2239              // itself) to use the new analyzed value instead of the "symbolic"
2240              // value.
2241              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2242              return PHISCEV;
2243            }
2244          }
2245        } else if (const SCEVAddRecExpr *AddRec =
2246                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2247          // Otherwise, this could be a loop like this:
2248          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2249          // In this case, j = {1,+,1}  and BEValue is j.
2250          // Because the other in-value of i (0) fits the evolution of BEValue
2251          // i really is an addrec evolution.
2252          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2253            SCEVHandle StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2254
2255            // If StartVal = j.start - j.stride, we can use StartVal as the
2256            // initial step of the addrec evolution.
2257            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2258                                            AddRec->getOperand(1))) {
2259              SCEVHandle PHISCEV =
2260                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2261
2262              // Okay, for the entire analysis of this edge we assumed the PHI
2263              // to be symbolic.  We now need to go back and update all of the
2264              // entries for the scalars that use the PHI (except for the PHI
2265              // itself) to use the new analyzed value instead of the "symbolic"
2266              // value.
2267              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2268              return PHISCEV;
2269            }
2270          }
2271        }
2272
2273        return SymbolicName;
2274      }
2275
2276  // If it's not a loop phi, we can't handle it yet.
2277  return getUnknown(PN);
2278}
2279
2280/// createNodeForGEP - Expand GEP instructions into add and multiply
2281/// operations. This allows them to be analyzed by regular SCEV code.
2282///
2283SCEVHandle ScalarEvolution::createNodeForGEP(User *GEP) {
2284
2285  const Type *IntPtrTy = TD->getIntPtrType();
2286  Value *Base = GEP->getOperand(0);
2287  // Don't attempt to analyze GEPs over unsized objects.
2288  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2289    return getUnknown(GEP);
2290  SCEVHandle TotalOffset = getIntegerSCEV(0, IntPtrTy);
2291  gep_type_iterator GTI = gep_type_begin(GEP);
2292  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2293                                      E = GEP->op_end();
2294       I != E; ++I) {
2295    Value *Index = *I;
2296    // Compute the (potentially symbolic) offset in bytes for this index.
2297    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2298      // For a struct, add the member offset.
2299      const StructLayout &SL = *TD->getStructLayout(STy);
2300      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2301      uint64_t Offset = SL.getElementOffset(FieldNo);
2302      TotalOffset = getAddExpr(TotalOffset,
2303                                  getIntegerSCEV(Offset, IntPtrTy));
2304    } else {
2305      // For an array, add the element offset, explicitly scaled.
2306      SCEVHandle LocalOffset = getSCEV(Index);
2307      if (!isa<PointerType>(LocalOffset->getType()))
2308        // Getelementptr indicies are signed.
2309        LocalOffset = getTruncateOrSignExtend(LocalOffset,
2310                                              IntPtrTy);
2311      LocalOffset =
2312        getMulExpr(LocalOffset,
2313                   getIntegerSCEV(TD->getTypeAllocSize(*GTI),
2314                                  IntPtrTy));
2315      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2316    }
2317  }
2318  return getAddExpr(getSCEV(Base), TotalOffset);
2319}
2320
2321/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2322/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2323/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2324/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2325uint32_t
2326ScalarEvolution::GetMinTrailingZeros(const SCEVHandle &S) {
2327  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2328    return C->getValue()->getValue().countTrailingZeros();
2329
2330  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2331    return std::min(GetMinTrailingZeros(T->getOperand()),
2332                    (uint32_t)getTypeSizeInBits(T->getType()));
2333
2334  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2335    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2336    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2337             getTypeSizeInBits(E->getType()) : OpRes;
2338  }
2339
2340  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2341    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2342    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2343             getTypeSizeInBits(E->getType()) : OpRes;
2344  }
2345
2346  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2347    // The result is the min of all operands results.
2348    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2349    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2350      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2351    return MinOpRes;
2352  }
2353
2354  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2355    // The result is the sum of all operands results.
2356    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2357    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2358    for (unsigned i = 1, e = M->getNumOperands();
2359         SumOpRes != BitWidth && i != e; ++i)
2360      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2361                          BitWidth);
2362    return SumOpRes;
2363  }
2364
2365  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2366    // The result is the min of all operands results.
2367    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2368    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2369      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2370    return MinOpRes;
2371  }
2372
2373  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2374    // The result is the min of all operands results.
2375    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2376    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2377      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2378    return MinOpRes;
2379  }
2380
2381  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2382    // The result is the min of all operands results.
2383    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2384    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2385      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2386    return MinOpRes;
2387  }
2388
2389  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2390    // For a SCEVUnknown, ask ValueTracking.
2391    unsigned BitWidth = getTypeSizeInBits(U->getType());
2392    APInt Mask = APInt::getAllOnesValue(BitWidth);
2393    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2394    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2395    return Zeros.countTrailingOnes();
2396  }
2397
2398  // SCEVUDivExpr
2399  return 0;
2400}
2401
2402uint32_t
2403ScalarEvolution::GetMinLeadingZeros(const SCEVHandle &S) {
2404  // TODO: Handle other SCEV expression types here.
2405
2406  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2407    return C->getValue()->getValue().countLeadingZeros();
2408
2409  if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) {
2410    // A zero-extension cast adds zero bits.
2411    return GetMinLeadingZeros(C->getOperand()) +
2412           (getTypeSizeInBits(C->getType()) -
2413            getTypeSizeInBits(C->getOperand()->getType()));
2414  }
2415
2416  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2417    // For a SCEVUnknown, ask ValueTracking.
2418    unsigned BitWidth = getTypeSizeInBits(U->getType());
2419    APInt Mask = APInt::getAllOnesValue(BitWidth);
2420    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2421    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2422    return Zeros.countLeadingOnes();
2423  }
2424
2425  return 1;
2426}
2427
2428uint32_t
2429ScalarEvolution::GetMinSignBits(const SCEVHandle &S) {
2430  // TODO: Handle other SCEV expression types here.
2431
2432  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
2433    const APInt &A = C->getValue()->getValue();
2434    return A.isNegative() ? A.countLeadingOnes() :
2435                            A.countLeadingZeros();
2436  }
2437
2438  if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) {
2439    // A sign-extension cast adds sign bits.
2440    return GetMinSignBits(C->getOperand()) +
2441           (getTypeSizeInBits(C->getType()) -
2442            getTypeSizeInBits(C->getOperand()->getType()));
2443  }
2444
2445  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2446    // For a SCEVUnknown, ask ValueTracking.
2447    return ComputeNumSignBits(U->getValue(), TD);
2448  }
2449
2450  return 1;
2451}
2452
2453/// createSCEV - We know that there is no SCEV for the specified value.
2454/// Analyze the expression.
2455///
2456SCEVHandle ScalarEvolution::createSCEV(Value *V) {
2457  if (!isSCEVable(V->getType()))
2458    return getUnknown(V);
2459
2460  unsigned Opcode = Instruction::UserOp1;
2461  if (Instruction *I = dyn_cast<Instruction>(V))
2462    Opcode = I->getOpcode();
2463  else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2464    Opcode = CE->getOpcode();
2465  else
2466    return getUnknown(V);
2467
2468  User *U = cast<User>(V);
2469  switch (Opcode) {
2470  case Instruction::Add:
2471    return getAddExpr(getSCEV(U->getOperand(0)),
2472                      getSCEV(U->getOperand(1)));
2473  case Instruction::Mul:
2474    return getMulExpr(getSCEV(U->getOperand(0)),
2475                      getSCEV(U->getOperand(1)));
2476  case Instruction::UDiv:
2477    return getUDivExpr(getSCEV(U->getOperand(0)),
2478                       getSCEV(U->getOperand(1)));
2479  case Instruction::Sub:
2480    return getMinusSCEV(getSCEV(U->getOperand(0)),
2481                        getSCEV(U->getOperand(1)));
2482  case Instruction::And:
2483    // For an expression like x&255 that merely masks off the high bits,
2484    // use zext(trunc(x)) as the SCEV expression.
2485    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2486      if (CI->isNullValue())
2487        return getSCEV(U->getOperand(1));
2488      if (CI->isAllOnesValue())
2489        return getSCEV(U->getOperand(0));
2490      const APInt &A = CI->getValue();
2491
2492      // Instcombine's ShrinkDemandedConstant may strip bits out of
2493      // constants, obscuring what would otherwise be a low-bits mask.
2494      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2495      // knew about to reconstruct a low-bits mask value.
2496      unsigned LZ = A.countLeadingZeros();
2497      unsigned BitWidth = A.getBitWidth();
2498      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2499      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2500      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2501
2502      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2503
2504      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2505        return
2506          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2507                                            IntegerType::get(BitWidth - LZ)),
2508                            U->getType());
2509    }
2510    break;
2511
2512  case Instruction::Or:
2513    // If the RHS of the Or is a constant, we may have something like:
2514    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
2515    // optimizations will transparently handle this case.
2516    //
2517    // In order for this transformation to be safe, the LHS must be of the
2518    // form X*(2^n) and the Or constant must be less than 2^n.
2519    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2520      SCEVHandle LHS = getSCEV(U->getOperand(0));
2521      const APInt &CIVal = CI->getValue();
2522      if (GetMinTrailingZeros(LHS) >=
2523          (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2524        return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2525    }
2526    break;
2527  case Instruction::Xor:
2528    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2529      // If the RHS of the xor is a signbit, then this is just an add.
2530      // Instcombine turns add of signbit into xor as a strength reduction step.
2531      if (CI->getValue().isSignBit())
2532        return getAddExpr(getSCEV(U->getOperand(0)),
2533                          getSCEV(U->getOperand(1)));
2534
2535      // If the RHS of xor is -1, then this is a not operation.
2536      if (CI->isAllOnesValue())
2537        return getNotSCEV(getSCEV(U->getOperand(0)));
2538
2539      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2540      // This is a variant of the check for xor with -1, and it handles
2541      // the case where instcombine has trimmed non-demanded bits out
2542      // of an xor with -1.
2543      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2544        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2545          if (BO->getOpcode() == Instruction::And &&
2546              LCI->getValue() == CI->getValue())
2547            if (const SCEVZeroExtendExpr *Z =
2548                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2549              const Type *UTy = U->getType();
2550              SCEVHandle Z0 = Z->getOperand();
2551              const Type *Z0Ty = Z0->getType();
2552              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2553
2554              // If C is a low-bits mask, the zero extend is zerving to
2555              // mask off the high bits. Complement the operand and
2556              // re-apply the zext.
2557              if (APIntOps::isMask(Z0TySize, CI->getValue()))
2558                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2559
2560              // If C is a single bit, it may be in the sign-bit position
2561              // before the zero-extend. In this case, represent the xor
2562              // using an add, which is equivalent, and re-apply the zext.
2563              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2564              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2565                  Trunc.isSignBit())
2566                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2567                                         UTy);
2568            }
2569    }
2570    break;
2571
2572  case Instruction::Shl:
2573    // Turn shift left of a constant amount into a multiply.
2574    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2575      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2576      Constant *X = ConstantInt::get(
2577        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2578      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2579    }
2580    break;
2581
2582  case Instruction::LShr:
2583    // Turn logical shift right of a constant into a unsigned divide.
2584    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2585      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2586      Constant *X = ConstantInt::get(
2587        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2588      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2589    }
2590    break;
2591
2592  case Instruction::AShr:
2593    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2594    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2595      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2596        if (L->getOpcode() == Instruction::Shl &&
2597            L->getOperand(1) == U->getOperand(1)) {
2598          unsigned BitWidth = getTypeSizeInBits(U->getType());
2599          uint64_t Amt = BitWidth - CI->getZExtValue();
2600          if (Amt == BitWidth)
2601            return getSCEV(L->getOperand(0));       // shift by zero --> noop
2602          if (Amt > BitWidth)
2603            return getIntegerSCEV(0, U->getType()); // value is undefined
2604          return
2605            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2606                                                      IntegerType::get(Amt)),
2607                                 U->getType());
2608        }
2609    break;
2610
2611  case Instruction::Trunc:
2612    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2613
2614  case Instruction::ZExt:
2615    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2616
2617  case Instruction::SExt:
2618    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2619
2620  case Instruction::BitCast:
2621    // BitCasts are no-op casts so we just eliminate the cast.
2622    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2623      return getSCEV(U->getOperand(0));
2624    break;
2625
2626  case Instruction::IntToPtr:
2627    if (!TD) break; // Without TD we can't analyze pointers.
2628    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2629                                   TD->getIntPtrType());
2630
2631  case Instruction::PtrToInt:
2632    if (!TD) break; // Without TD we can't analyze pointers.
2633    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2634                                   U->getType());
2635
2636  case Instruction::GetElementPtr:
2637    if (!TD) break; // Without TD we can't analyze pointers.
2638    return createNodeForGEP(U);
2639
2640  case Instruction::PHI:
2641    return createNodeForPHI(cast<PHINode>(U));
2642
2643  case Instruction::Select:
2644    // This could be a smax or umax that was lowered earlier.
2645    // Try to recover it.
2646    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2647      Value *LHS = ICI->getOperand(0);
2648      Value *RHS = ICI->getOperand(1);
2649      switch (ICI->getPredicate()) {
2650      case ICmpInst::ICMP_SLT:
2651      case ICmpInst::ICMP_SLE:
2652        std::swap(LHS, RHS);
2653        // fall through
2654      case ICmpInst::ICMP_SGT:
2655      case ICmpInst::ICMP_SGE:
2656        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2657          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2658        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2659          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2660        break;
2661      case ICmpInst::ICMP_ULT:
2662      case ICmpInst::ICMP_ULE:
2663        std::swap(LHS, RHS);
2664        // fall through
2665      case ICmpInst::ICMP_UGT:
2666      case ICmpInst::ICMP_UGE:
2667        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2668          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2669        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2670          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2671        break;
2672      case ICmpInst::ICMP_NE:
2673        // n != 0 ? n : 1  ->  umax(n, 1)
2674        if (LHS == U->getOperand(1) &&
2675            isa<ConstantInt>(U->getOperand(2)) &&
2676            cast<ConstantInt>(U->getOperand(2))->isOne() &&
2677            isa<ConstantInt>(RHS) &&
2678            cast<ConstantInt>(RHS)->isZero())
2679          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2680        break;
2681      case ICmpInst::ICMP_EQ:
2682        // n == 0 ? 1 : n  ->  umax(n, 1)
2683        if (LHS == U->getOperand(2) &&
2684            isa<ConstantInt>(U->getOperand(1)) &&
2685            cast<ConstantInt>(U->getOperand(1))->isOne() &&
2686            isa<ConstantInt>(RHS) &&
2687            cast<ConstantInt>(RHS)->isZero())
2688          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
2689        break;
2690      default:
2691        break;
2692      }
2693    }
2694
2695  default: // We cannot analyze this expression.
2696    break;
2697  }
2698
2699  return getUnknown(V);
2700}
2701
2702
2703
2704//===----------------------------------------------------------------------===//
2705//                   Iteration Count Computation Code
2706//
2707
2708/// getBackedgeTakenCount - If the specified loop has a predictable
2709/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
2710/// object. The backedge-taken count is the number of times the loop header
2711/// will be branched to from within the loop. This is one less than the
2712/// trip count of the loop, since it doesn't count the first iteration,
2713/// when the header is branched to from outside the loop.
2714///
2715/// Note that it is not valid to call this method on a loop without a
2716/// loop-invariant backedge-taken count (see
2717/// hasLoopInvariantBackedgeTakenCount).
2718///
2719SCEVHandle ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
2720  return getBackedgeTakenInfo(L).Exact;
2721}
2722
2723/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
2724/// return the least SCEV value that is known never to be less than the
2725/// actual backedge taken count.
2726SCEVHandle ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
2727  return getBackedgeTakenInfo(L).Max;
2728}
2729
2730const ScalarEvolution::BackedgeTakenInfo &
2731ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
2732  // Initially insert a CouldNotCompute for this loop. If the insertion
2733  // succeeds, procede to actually compute a backedge-taken count and
2734  // update the value. The temporary CouldNotCompute value tells SCEV
2735  // code elsewhere that it shouldn't attempt to request a new
2736  // backedge-taken count, which could result in infinite recursion.
2737  std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
2738    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
2739  if (Pair.second) {
2740    BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
2741    if (ItCount.Exact != CouldNotCompute) {
2742      assert(ItCount.Exact->isLoopInvariant(L) &&
2743             ItCount.Max->isLoopInvariant(L) &&
2744             "Computed trip count isn't loop invariant for loop!");
2745      ++NumTripCountsComputed;
2746
2747      // Update the value in the map.
2748      Pair.first->second = ItCount;
2749    } else {
2750      if (ItCount.Max != CouldNotCompute)
2751        // Update the value in the map.
2752        Pair.first->second = ItCount;
2753      if (isa<PHINode>(L->getHeader()->begin()))
2754        // Only count loops that have phi nodes as not being computable.
2755        ++NumTripCountsNotComputed;
2756    }
2757
2758    // Now that we know more about the trip count for this loop, forget any
2759    // existing SCEV values for PHI nodes in this loop since they are only
2760    // conservative estimates made without the benefit
2761    // of trip count information.
2762    if (ItCount.hasAnyInfo())
2763      forgetLoopPHIs(L);
2764  }
2765  return Pair.first->second;
2766}
2767
2768/// forgetLoopBackedgeTakenCount - This method should be called by the
2769/// client when it has changed a loop in a way that may effect
2770/// ScalarEvolution's ability to compute a trip count, or if the loop
2771/// is deleted.
2772void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
2773  BackedgeTakenCounts.erase(L);
2774  forgetLoopPHIs(L);
2775}
2776
2777/// forgetLoopPHIs - Delete the memoized SCEVs associated with the
2778/// PHI nodes in the given loop. This is used when the trip count of
2779/// the loop may have changed.
2780void ScalarEvolution::forgetLoopPHIs(const Loop *L) {
2781  BasicBlock *Header = L->getHeader();
2782
2783  // Push all Loop-header PHIs onto the Worklist stack, except those
2784  // that are presently represented via a SCEVUnknown. SCEVUnknown for
2785  // a PHI either means that it has an unrecognized structure, or it's
2786  // a PHI that's in the progress of being computed by createNodeForPHI.
2787  // In the former case, additional loop trip count information isn't
2788  // going to change anything. In the later case, createNodeForPHI will
2789  // perform the necessary updates on its own when it gets to that point.
2790  SmallVector<Instruction *, 16> Worklist;
2791  for (BasicBlock::iterator I = Header->begin();
2792       PHINode *PN = dyn_cast<PHINode>(I); ++I) {
2793    std::map<SCEVCallbackVH, SCEVHandle>::iterator It = Scalars.find((Value*)I);
2794    if (It != Scalars.end() && !isa<SCEVUnknown>(It->second))
2795      Worklist.push_back(PN);
2796  }
2797
2798  while (!Worklist.empty()) {
2799    Instruction *I = Worklist.pop_back_val();
2800    if (Scalars.erase(I))
2801      for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2802           UI != UE; ++UI)
2803        Worklist.push_back(cast<Instruction>(UI));
2804  }
2805}
2806
2807/// ComputeBackedgeTakenCount - Compute the number of times the backedge
2808/// of the specified loop will execute.
2809ScalarEvolution::BackedgeTakenInfo
2810ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
2811  SmallVector<BasicBlock*, 8> ExitingBlocks;
2812  L->getExitingBlocks(ExitingBlocks);
2813
2814  // Examine all exits and pick the most conservative values.
2815  SCEVHandle BECount = CouldNotCompute;
2816  SCEVHandle MaxBECount = CouldNotCompute;
2817  bool CouldNotComputeBECount = false;
2818  bool CouldNotComputeMaxBECount = false;
2819  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2820    BackedgeTakenInfo NewBTI =
2821      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
2822
2823    if (NewBTI.Exact == CouldNotCompute) {
2824      // We couldn't compute an exact value for this exit, so
2825      // we don't be able to compute an exact value for the loop.
2826      CouldNotComputeBECount = true;
2827      BECount = CouldNotCompute;
2828    } else if (!CouldNotComputeBECount) {
2829      if (BECount == CouldNotCompute)
2830        BECount = NewBTI.Exact;
2831      else {
2832        // TODO: More analysis could be done here. For example, a
2833        // loop with a short-circuiting && operator has an exact count
2834        // of the min of both sides.
2835        CouldNotComputeBECount = true;
2836        BECount = CouldNotCompute;
2837      }
2838    }
2839    if (NewBTI.Max == CouldNotCompute) {
2840      // We couldn't compute an maximum value for this exit, so
2841      // we don't be able to compute an maximum value for the loop.
2842      CouldNotComputeMaxBECount = true;
2843      MaxBECount = CouldNotCompute;
2844    } else if (!CouldNotComputeMaxBECount) {
2845      if (MaxBECount == CouldNotCompute)
2846        MaxBECount = NewBTI.Max;
2847      else
2848        MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, NewBTI.Max);
2849    }
2850  }
2851
2852  return BackedgeTakenInfo(BECount, MaxBECount);
2853}
2854
2855/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
2856/// of the specified loop will execute if it exits via the specified block.
2857ScalarEvolution::BackedgeTakenInfo
2858ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
2859                                                   BasicBlock *ExitingBlock) {
2860
2861  // Okay, we've chosen an exiting block.  See what condition causes us to
2862  // exit at this block.
2863  //
2864  // FIXME: we should be able to handle switch instructions (with a single exit)
2865  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2866  if (ExitBr == 0) return CouldNotCompute;
2867  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
2868
2869  // At this point, we know we have a conditional branch that determines whether
2870  // the loop is exited.  However, we don't know if the branch is executed each
2871  // time through the loop.  If not, then the execution count of the branch will
2872  // not be equal to the trip count of the loop.
2873  //
2874  // Currently we check for this by checking to see if the Exit branch goes to
2875  // the loop header.  If so, we know it will always execute the same number of
2876  // times as the loop.  We also handle the case where the exit block *is* the
2877  // loop header.  This is common for un-rotated loops.
2878  //
2879  // If both of those tests fail, walk up the unique predecessor chain to the
2880  // header, stopping if there is an edge that doesn't exit the loop. If the
2881  // header is reached, the execution count of the branch will be equal to the
2882  // trip count of the loop.
2883  //
2884  //  More extensive analysis could be done to handle more cases here.
2885  //
2886  if (ExitBr->getSuccessor(0) != L->getHeader() &&
2887      ExitBr->getSuccessor(1) != L->getHeader() &&
2888      ExitBr->getParent() != L->getHeader()) {
2889    // The simple checks failed, try climbing the unique predecessor chain
2890    // up to the header.
2891    bool Ok = false;
2892    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
2893      BasicBlock *Pred = BB->getUniquePredecessor();
2894      if (!Pred)
2895        return CouldNotCompute;
2896      TerminatorInst *PredTerm = Pred->getTerminator();
2897      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
2898        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
2899        if (PredSucc == BB)
2900          continue;
2901        // If the predecessor has a successor that isn't BB and isn't
2902        // outside the loop, assume the worst.
2903        if (L->contains(PredSucc))
2904          return CouldNotCompute;
2905      }
2906      if (Pred == L->getHeader()) {
2907        Ok = true;
2908        break;
2909      }
2910      BB = Pred;
2911    }
2912    if (!Ok)
2913      return CouldNotCompute;
2914  }
2915
2916  // Procede to the next level to examine the exit condition expression.
2917  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
2918                                               ExitBr->getSuccessor(0),
2919                                               ExitBr->getSuccessor(1));
2920}
2921
2922/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
2923/// backedge of the specified loop will execute if its exit condition
2924/// were a conditional branch of ExitCond, TBB, and FBB.
2925ScalarEvolution::BackedgeTakenInfo
2926ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
2927                                                       Value *ExitCond,
2928                                                       BasicBlock *TBB,
2929                                                       BasicBlock *FBB) {
2930  // Check if the controlling expression for this loop is an and or or. In
2931  // such cases, an exact backedge-taken count may be infeasible, but a
2932  // maximum count may still be feasible.
2933  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
2934    if (BO->getOpcode() == Instruction::And) {
2935      // Recurse on the operands of the and.
2936      BackedgeTakenInfo BTI0 =
2937        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
2938      BackedgeTakenInfo BTI1 =
2939        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
2940      SCEVHandle BECount = CouldNotCompute;
2941      SCEVHandle MaxBECount = CouldNotCompute;
2942      if (L->contains(TBB)) {
2943        // Both conditions must be true for the loop to continue executing.
2944        // Choose the less conservative count.
2945        // TODO: Take the minimum of the exact counts.
2946        if (BTI0.Exact == BTI1.Exact)
2947          BECount = BTI0.Exact;
2948        // TODO: Take the minimum of the maximum counts.
2949        if (BTI0.Max == CouldNotCompute)
2950          MaxBECount = BTI1.Max;
2951        else if (BTI1.Max == CouldNotCompute)
2952          MaxBECount = BTI0.Max;
2953        else if (const SCEVConstant *C0 = dyn_cast<SCEVConstant>(BTI0.Max))
2954          if (const SCEVConstant *C1 = dyn_cast<SCEVConstant>(BTI1.Max))
2955              MaxBECount = getConstant(APIntOps::umin(C0->getValue()->getValue(),
2956                                                      C1->getValue()->getValue()));
2957      } else {
2958        // Both conditions must be true for the loop to exit.
2959        assert(L->contains(FBB) && "Loop block has no successor in loop!");
2960        if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute)
2961          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
2962        if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute)
2963          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
2964      }
2965
2966      return BackedgeTakenInfo(BECount, MaxBECount);
2967    }
2968    if (BO->getOpcode() == Instruction::Or) {
2969      // Recurse on the operands of the or.
2970      BackedgeTakenInfo BTI0 =
2971        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
2972      BackedgeTakenInfo BTI1 =
2973        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
2974      SCEVHandle BECount = CouldNotCompute;
2975      SCEVHandle MaxBECount = CouldNotCompute;
2976      if (L->contains(FBB)) {
2977        // Both conditions must be false for the loop to continue executing.
2978        // Choose the less conservative count.
2979        // TODO: Take the minimum of the exact counts.
2980        if (BTI0.Exact == BTI1.Exact)
2981          BECount = BTI0.Exact;
2982        // TODO: Take the minimum of the maximum counts.
2983        if (BTI0.Max == CouldNotCompute)
2984          MaxBECount = BTI1.Max;
2985        else if (BTI1.Max == CouldNotCompute)
2986          MaxBECount = BTI0.Max;
2987        else if (const SCEVConstant *C0 = dyn_cast<SCEVConstant>(BTI0.Max))
2988          if (const SCEVConstant *C1 = dyn_cast<SCEVConstant>(BTI1.Max))
2989              MaxBECount = getConstant(APIntOps::umin(C0->getValue()->getValue(),
2990                                                      C1->getValue()->getValue()));
2991      } else {
2992        // Both conditions must be false for the loop to exit.
2993        assert(L->contains(TBB) && "Loop block has no successor in loop!");
2994        if (BTI0.Exact != CouldNotCompute && BTI1.Exact != CouldNotCompute)
2995          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
2996        if (BTI0.Max != CouldNotCompute && BTI1.Max != CouldNotCompute)
2997          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
2998      }
2999
3000      return BackedgeTakenInfo(BECount, MaxBECount);
3001    }
3002  }
3003
3004  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3005  // Procede to the next level to examine the icmp.
3006  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3007    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3008
3009  // If it's not an integer or pointer comparison then compute it the hard way.
3010  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3011}
3012
3013/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3014/// backedge of the specified loop will execute if its exit condition
3015/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3016ScalarEvolution::BackedgeTakenInfo
3017ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3018                                                           ICmpInst *ExitCond,
3019                                                           BasicBlock *TBB,
3020                                                           BasicBlock *FBB) {
3021
3022  // If the condition was exit on true, convert the condition to exit on false
3023  ICmpInst::Predicate Cond;
3024  if (!L->contains(FBB))
3025    Cond = ExitCond->getPredicate();
3026  else
3027    Cond = ExitCond->getInversePredicate();
3028
3029  // Handle common loops like: for (X = "string"; *X; ++X)
3030  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3031    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3032      SCEVHandle ItCnt =
3033        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3034      if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3035        unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3036        return BackedgeTakenInfo(ItCnt,
3037                                 isa<SCEVConstant>(ItCnt) ? ItCnt :
3038                                   getConstant(APInt::getMaxValue(BitWidth)-1));
3039      }
3040    }
3041
3042  SCEVHandle LHS = getSCEV(ExitCond->getOperand(0));
3043  SCEVHandle RHS = getSCEV(ExitCond->getOperand(1));
3044
3045  // Try to evaluate any dependencies out of the loop.
3046  LHS = getSCEVAtScope(LHS, L);
3047  RHS = getSCEVAtScope(RHS, L);
3048
3049  // At this point, we would like to compute how many iterations of the
3050  // loop the predicate will return true for these inputs.
3051  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3052    // If there is a loop-invariant, force it into the RHS.
3053    std::swap(LHS, RHS);
3054    Cond = ICmpInst::getSwappedPredicate(Cond);
3055  }
3056
3057  // If we have a comparison of a chrec against a constant, try to use value
3058  // ranges to answer this query.
3059  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3060    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3061      if (AddRec->getLoop() == L) {
3062        // Form the constant range.
3063        ConstantRange CompRange(
3064            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3065
3066        SCEVHandle Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3067        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3068      }
3069
3070  switch (Cond) {
3071  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3072    // Convert to: while (X-Y != 0)
3073    SCEVHandle TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3074    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3075    break;
3076  }
3077  case ICmpInst::ICMP_EQ: {
3078    // Convert to: while (X-Y == 0)           // while (X == Y)
3079    SCEVHandle TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3080    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3081    break;
3082  }
3083  case ICmpInst::ICMP_SLT: {
3084    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3085    if (BTI.hasAnyInfo()) return BTI;
3086    break;
3087  }
3088  case ICmpInst::ICMP_SGT: {
3089    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3090                                             getNotSCEV(RHS), L, true);
3091    if (BTI.hasAnyInfo()) return BTI;
3092    break;
3093  }
3094  case ICmpInst::ICMP_ULT: {
3095    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3096    if (BTI.hasAnyInfo()) return BTI;
3097    break;
3098  }
3099  case ICmpInst::ICMP_UGT: {
3100    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3101                                             getNotSCEV(RHS), L, false);
3102    if (BTI.hasAnyInfo()) return BTI;
3103    break;
3104  }
3105  default:
3106#if 0
3107    errs() << "ComputeBackedgeTakenCount ";
3108    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3109      errs() << "[unsigned] ";
3110    errs() << *LHS << "   "
3111         << Instruction::getOpcodeName(Instruction::ICmp)
3112         << "   " << *RHS << "\n";
3113#endif
3114    break;
3115  }
3116  return
3117    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3118}
3119
3120static ConstantInt *
3121EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3122                                ScalarEvolution &SE) {
3123  SCEVHandle InVal = SE.getConstant(C);
3124  SCEVHandle Val = AddRec->evaluateAtIteration(InVal, SE);
3125  assert(isa<SCEVConstant>(Val) &&
3126         "Evaluation of SCEV at constant didn't fold correctly?");
3127  return cast<SCEVConstant>(Val)->getValue();
3128}
3129
3130/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3131/// and a GEP expression (missing the pointer index) indexing into it, return
3132/// the addressed element of the initializer or null if the index expression is
3133/// invalid.
3134static Constant *
3135GetAddressedElementFromGlobal(GlobalVariable *GV,
3136                              const std::vector<ConstantInt*> &Indices) {
3137  Constant *Init = GV->getInitializer();
3138  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3139    uint64_t Idx = Indices[i]->getZExtValue();
3140    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3141      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3142      Init = cast<Constant>(CS->getOperand(Idx));
3143    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3144      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3145      Init = cast<Constant>(CA->getOperand(Idx));
3146    } else if (isa<ConstantAggregateZero>(Init)) {
3147      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3148        assert(Idx < STy->getNumElements() && "Bad struct index!");
3149        Init = Constant::getNullValue(STy->getElementType(Idx));
3150      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3151        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3152        Init = Constant::getNullValue(ATy->getElementType());
3153      } else {
3154        assert(0 && "Unknown constant aggregate type!");
3155      }
3156      return 0;
3157    } else {
3158      return 0; // Unknown initializer type
3159    }
3160  }
3161  return Init;
3162}
3163
3164/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3165/// 'icmp op load X, cst', try to see if we can compute the backedge
3166/// execution count.
3167SCEVHandle ScalarEvolution::
3168ComputeLoadConstantCompareBackedgeTakenCount(LoadInst *LI, Constant *RHS,
3169                                             const Loop *L,
3170                                             ICmpInst::Predicate predicate) {
3171  if (LI->isVolatile()) return CouldNotCompute;
3172
3173  // Check to see if the loaded pointer is a getelementptr of a global.
3174  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3175  if (!GEP) return CouldNotCompute;
3176
3177  // Make sure that it is really a constant global we are gepping, with an
3178  // initializer, and make sure the first IDX is really 0.
3179  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3180  if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3181      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3182      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3183    return CouldNotCompute;
3184
3185  // Okay, we allow one non-constant index into the GEP instruction.
3186  Value *VarIdx = 0;
3187  std::vector<ConstantInt*> Indexes;
3188  unsigned VarIdxNum = 0;
3189  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3190    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3191      Indexes.push_back(CI);
3192    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3193      if (VarIdx) return CouldNotCompute;  // Multiple non-constant idx's.
3194      VarIdx = GEP->getOperand(i);
3195      VarIdxNum = i-2;
3196      Indexes.push_back(0);
3197    }
3198
3199  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3200  // Check to see if X is a loop variant variable value now.
3201  SCEVHandle Idx = getSCEV(VarIdx);
3202  Idx = getSCEVAtScope(Idx, L);
3203
3204  // We can only recognize very limited forms of loop index expressions, in
3205  // particular, only affine AddRec's like {C1,+,C2}.
3206  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3207  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3208      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3209      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3210    return CouldNotCompute;
3211
3212  unsigned MaxSteps = MaxBruteForceIterations;
3213  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3214    ConstantInt *ItCst =
3215      ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum);
3216    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3217
3218    // Form the GEP offset.
3219    Indexes[VarIdxNum] = Val;
3220
3221    Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3222    if (Result == 0) break;  // Cannot compute!
3223
3224    // Evaluate the condition for this iteration.
3225    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3226    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3227    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3228#if 0
3229      errs() << "\n***\n*** Computed loop count " << *ItCst
3230             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3231             << "***\n";
3232#endif
3233      ++NumArrayLenItCounts;
3234      return getConstant(ItCst);   // Found terminating iteration!
3235    }
3236  }
3237  return CouldNotCompute;
3238}
3239
3240
3241/// CanConstantFold - Return true if we can constant fold an instruction of the
3242/// specified type, assuming that all operands were constants.
3243static bool CanConstantFold(const Instruction *I) {
3244  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3245      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3246    return true;
3247
3248  if (const CallInst *CI = dyn_cast<CallInst>(I))
3249    if (const Function *F = CI->getCalledFunction())
3250      return canConstantFoldCallTo(F);
3251  return false;
3252}
3253
3254/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3255/// in the loop that V is derived from.  We allow arbitrary operations along the
3256/// way, but the operands of an operation must either be constants or a value
3257/// derived from a constant PHI.  If this expression does not fit with these
3258/// constraints, return null.
3259static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3260  // If this is not an instruction, or if this is an instruction outside of the
3261  // loop, it can't be derived from a loop PHI.
3262  Instruction *I = dyn_cast<Instruction>(V);
3263  if (I == 0 || !L->contains(I->getParent())) return 0;
3264
3265  if (PHINode *PN = dyn_cast<PHINode>(I)) {
3266    if (L->getHeader() == I->getParent())
3267      return PN;
3268    else
3269      // We don't currently keep track of the control flow needed to evaluate
3270      // PHIs, so we cannot handle PHIs inside of loops.
3271      return 0;
3272  }
3273
3274  // If we won't be able to constant fold this expression even if the operands
3275  // are constants, return early.
3276  if (!CanConstantFold(I)) return 0;
3277
3278  // Otherwise, we can evaluate this instruction if all of its operands are
3279  // constant or derived from a PHI node themselves.
3280  PHINode *PHI = 0;
3281  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3282    if (!(isa<Constant>(I->getOperand(Op)) ||
3283          isa<GlobalValue>(I->getOperand(Op)))) {
3284      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3285      if (P == 0) return 0;  // Not evolving from PHI
3286      if (PHI == 0)
3287        PHI = P;
3288      else if (PHI != P)
3289        return 0;  // Evolving from multiple different PHIs.
3290    }
3291
3292  // This is a expression evolving from a constant PHI!
3293  return PHI;
3294}
3295
3296/// EvaluateExpression - Given an expression that passes the
3297/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3298/// in the loop has the value PHIVal.  If we can't fold this expression for some
3299/// reason, return null.
3300static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3301  if (isa<PHINode>(V)) return PHIVal;
3302  if (Constant *C = dyn_cast<Constant>(V)) return C;
3303  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3304  Instruction *I = cast<Instruction>(V);
3305
3306  std::vector<Constant*> Operands;
3307  Operands.resize(I->getNumOperands());
3308
3309  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3310    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3311    if (Operands[i] == 0) return 0;
3312  }
3313
3314  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3315    return ConstantFoldCompareInstOperands(CI->getPredicate(),
3316                                           &Operands[0], Operands.size());
3317  else
3318    return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3319                                    &Operands[0], Operands.size());
3320}
3321
3322/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3323/// in the header of its containing loop, we know the loop executes a
3324/// constant number of times, and the PHI node is just a recurrence
3325/// involving constants, fold it.
3326Constant *ScalarEvolution::
3327getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs, const Loop *L){
3328  std::map<PHINode*, Constant*>::iterator I =
3329    ConstantEvolutionLoopExitValue.find(PN);
3330  if (I != ConstantEvolutionLoopExitValue.end())
3331    return I->second;
3332
3333  if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3334    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
3335
3336  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3337
3338  // Since the loop is canonicalized, the PHI node must have two entries.  One
3339  // entry must be a constant (coming in from outside of the loop), and the
3340  // second must be derived from the same PHI.
3341  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3342  Constant *StartCST =
3343    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3344  if (StartCST == 0)
3345    return RetVal = 0;  // Must be a constant.
3346
3347  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3348  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3349  if (PN2 != PN)
3350    return RetVal = 0;  // Not derived from same PHI.
3351
3352  // Execute the loop symbolically to determine the exit value.
3353  if (BEs.getActiveBits() >= 32)
3354    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3355
3356  unsigned NumIterations = BEs.getZExtValue(); // must be in range
3357  unsigned IterationNum = 0;
3358  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3359    if (IterationNum == NumIterations)
3360      return RetVal = PHIVal;  // Got exit value!
3361
3362    // Compute the value of the PHI node for the next iteration.
3363    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3364    if (NextPHI == PHIVal)
3365      return RetVal = NextPHI;  // Stopped evolving!
3366    if (NextPHI == 0)
3367      return 0;        // Couldn't evaluate!
3368    PHIVal = NextPHI;
3369  }
3370}
3371
3372/// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3373/// constant number of times (the condition evolves only from constants),
3374/// try to evaluate a few iterations of the loop until we get the exit
3375/// condition gets a value of ExitWhen (true or false).  If we cannot
3376/// evaluate the trip count of the loop, return CouldNotCompute.
3377SCEVHandle ScalarEvolution::
3378ComputeBackedgeTakenCountExhaustively(const Loop *L, Value *Cond, bool ExitWhen) {
3379  PHINode *PN = getConstantEvolvingPHI(Cond, L);
3380  if (PN == 0) return CouldNotCompute;
3381
3382  // Since the loop is canonicalized, the PHI node must have two entries.  One
3383  // entry must be a constant (coming in from outside of the loop), and the
3384  // second must be derived from the same PHI.
3385  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3386  Constant *StartCST =
3387    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3388  if (StartCST == 0) return CouldNotCompute;  // Must be a constant.
3389
3390  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3391  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3392  if (PN2 != PN) return CouldNotCompute;  // Not derived from same PHI.
3393
3394  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
3395  // the loop symbolically to determine when the condition gets a value of
3396  // "ExitWhen".
3397  unsigned IterationNum = 0;
3398  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
3399  for (Constant *PHIVal = StartCST;
3400       IterationNum != MaxIterations; ++IterationNum) {
3401    ConstantInt *CondVal =
3402      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3403
3404    // Couldn't symbolically evaluate.
3405    if (!CondVal) return CouldNotCompute;
3406
3407    if (CondVal->getValue() == uint64_t(ExitWhen)) {
3408      ConstantEvolutionLoopExitValue[PN] = PHIVal;
3409      ++NumBruteForceTripCountsComputed;
3410      return getConstant(Type::Int32Ty, IterationNum);
3411    }
3412
3413    // Compute the value of the PHI node for the next iteration.
3414    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3415    if (NextPHI == 0 || NextPHI == PHIVal)
3416      return CouldNotCompute;   // Couldn't evaluate or not making progress...
3417    PHIVal = NextPHI;
3418  }
3419
3420  // Too many iterations were needed to evaluate.
3421  return CouldNotCompute;
3422}
3423
3424/// getSCEVAtScope - Return a SCEV expression handle for the specified value
3425/// at the specified scope in the program.  The L value specifies a loop
3426/// nest to evaluate the expression at, where null is the top-level or a
3427/// specified loop is immediately inside of the loop.
3428///
3429/// This method can be used to compute the exit value for a variable defined
3430/// in a loop by querying what the value will hold in the parent loop.
3431///
3432/// In the case that a relevant loop exit value cannot be computed, the
3433/// original value V is returned.
3434SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3435  // FIXME: this should be turned into a virtual method on SCEV!
3436
3437  if (isa<SCEVConstant>(V)) return V;
3438
3439  // If this instruction is evolved from a constant-evolving PHI, compute the
3440  // exit value from the loop without using SCEVs.
3441  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3442    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3443      const Loop *LI = (*this->LI)[I->getParent()];
3444      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
3445        if (PHINode *PN = dyn_cast<PHINode>(I))
3446          if (PN->getParent() == LI->getHeader()) {
3447            // Okay, there is no closed form solution for the PHI node.  Check
3448            // to see if the loop that contains it has a known backedge-taken
3449            // count.  If so, we may be able to force computation of the exit
3450            // value.
3451            SCEVHandle BackedgeTakenCount = getBackedgeTakenCount(LI);
3452            if (const SCEVConstant *BTCC =
3453                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3454              // Okay, we know how many times the containing loop executes.  If
3455              // this is a constant evolving PHI node, get the final value at
3456              // the specified iteration number.
3457              Constant *RV = getConstantEvolutionLoopExitValue(PN,
3458                                                   BTCC->getValue()->getValue(),
3459                                                               LI);
3460              if (RV) return getUnknown(RV);
3461            }
3462          }
3463
3464      // Okay, this is an expression that we cannot symbolically evaluate
3465      // into a SCEV.  Check to see if it's possible to symbolically evaluate
3466      // the arguments into constants, and if so, try to constant propagate the
3467      // result.  This is particularly useful for computing loop exit values.
3468      if (CanConstantFold(I)) {
3469        // Check to see if we've folded this instruction at this loop before.
3470        std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3471        std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3472          Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3473        if (!Pair.second)
3474          return Pair.first->second ? &*getUnknown(Pair.first->second) : V;
3475
3476        std::vector<Constant*> Operands;
3477        Operands.reserve(I->getNumOperands());
3478        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3479          Value *Op = I->getOperand(i);
3480          if (Constant *C = dyn_cast<Constant>(Op)) {
3481            Operands.push_back(C);
3482          } else {
3483            // If any of the operands is non-constant and if they are
3484            // non-integer and non-pointer, don't even try to analyze them
3485            // with scev techniques.
3486            if (!isSCEVable(Op->getType()))
3487              return V;
3488
3489            SCEVHandle OpV = getSCEVAtScope(getSCEV(Op), L);
3490            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3491              Constant *C = SC->getValue();
3492              if (C->getType() != Op->getType())
3493                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3494                                                                  Op->getType(),
3495                                                                  false),
3496                                          C, Op->getType());
3497              Operands.push_back(C);
3498            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3499              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3500                if (C->getType() != Op->getType())
3501                  C =
3502                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3503                                                                  Op->getType(),
3504                                                                  false),
3505                                          C, Op->getType());
3506                Operands.push_back(C);
3507              } else
3508                return V;
3509            } else {
3510              return V;
3511            }
3512          }
3513        }
3514
3515        Constant *C;
3516        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3517          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3518                                              &Operands[0], Operands.size());
3519        else
3520          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3521                                       &Operands[0], Operands.size());
3522        Pair.first->second = C;
3523        return getUnknown(C);
3524      }
3525    }
3526
3527    // This is some other type of SCEVUnknown, just return it.
3528    return V;
3529  }
3530
3531  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3532    // Avoid performing the look-up in the common case where the specified
3533    // expression has no loop-variant portions.
3534    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3535      SCEVHandle OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3536      if (OpAtScope != Comm->getOperand(i)) {
3537        // Okay, at least one of these operands is loop variant but might be
3538        // foldable.  Build a new instance of the folded commutative expression.
3539        SmallVector<SCEVHandle, 8> NewOps(Comm->op_begin(), Comm->op_begin()+i);
3540        NewOps.push_back(OpAtScope);
3541
3542        for (++i; i != e; ++i) {
3543          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3544          NewOps.push_back(OpAtScope);
3545        }
3546        if (isa<SCEVAddExpr>(Comm))
3547          return getAddExpr(NewOps);
3548        if (isa<SCEVMulExpr>(Comm))
3549          return getMulExpr(NewOps);
3550        if (isa<SCEVSMaxExpr>(Comm))
3551          return getSMaxExpr(NewOps);
3552        if (isa<SCEVUMaxExpr>(Comm))
3553          return getUMaxExpr(NewOps);
3554        assert(0 && "Unknown commutative SCEV type!");
3555      }
3556    }
3557    // If we got here, all operands are loop invariant.
3558    return Comm;
3559  }
3560
3561  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3562    SCEVHandle LHS = getSCEVAtScope(Div->getLHS(), L);
3563    SCEVHandle RHS = getSCEVAtScope(Div->getRHS(), L);
3564    if (LHS == Div->getLHS() && RHS == Div->getRHS())
3565      return Div;   // must be loop invariant
3566    return getUDivExpr(LHS, RHS);
3567  }
3568
3569  // If this is a loop recurrence for a loop that does not contain L, then we
3570  // are dealing with the final value computed by the loop.
3571  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3572    if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3573      // To evaluate this recurrence, we need to know how many times the AddRec
3574      // loop iterates.  Compute this now.
3575      SCEVHandle BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3576      if (BackedgeTakenCount == CouldNotCompute) return AddRec;
3577
3578      // Then, evaluate the AddRec.
3579      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3580    }
3581    return AddRec;
3582  }
3583
3584  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3585    SCEVHandle Op = getSCEVAtScope(Cast->getOperand(), L);
3586    if (Op == Cast->getOperand())
3587      return Cast;  // must be loop invariant
3588    return getZeroExtendExpr(Op, Cast->getType());
3589  }
3590
3591  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3592    SCEVHandle Op = getSCEVAtScope(Cast->getOperand(), L);
3593    if (Op == Cast->getOperand())
3594      return Cast;  // must be loop invariant
3595    return getSignExtendExpr(Op, Cast->getType());
3596  }
3597
3598  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3599    SCEVHandle Op = getSCEVAtScope(Cast->getOperand(), L);
3600    if (Op == Cast->getOperand())
3601      return Cast;  // must be loop invariant
3602    return getTruncateExpr(Op, Cast->getType());
3603  }
3604
3605  assert(0 && "Unknown SCEV type!");
3606  return 0;
3607}
3608
3609/// getSCEVAtScope - This is a convenience function which does
3610/// getSCEVAtScope(getSCEV(V), L).
3611SCEVHandle ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3612  return getSCEVAtScope(getSCEV(V), L);
3613}
3614
3615/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3616/// following equation:
3617///
3618///     A * X = B (mod N)
3619///
3620/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3621/// A and B isn't important.
3622///
3623/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3624static SCEVHandle SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3625                                               ScalarEvolution &SE) {
3626  uint32_t BW = A.getBitWidth();
3627  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3628  assert(A != 0 && "A must be non-zero.");
3629
3630  // 1. D = gcd(A, N)
3631  //
3632  // The gcd of A and N may have only one prime factor: 2. The number of
3633  // trailing zeros in A is its multiplicity
3634  uint32_t Mult2 = A.countTrailingZeros();
3635  // D = 2^Mult2
3636
3637  // 2. Check if B is divisible by D.
3638  //
3639  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3640  // is not less than multiplicity of this prime factor for D.
3641  if (B.countTrailingZeros() < Mult2)
3642    return SE.getCouldNotCompute();
3643
3644  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3645  // modulo (N / D).
3646  //
3647  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
3648  // bit width during computations.
3649  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
3650  APInt Mod(BW + 1, 0);
3651  Mod.set(BW - Mult2);  // Mod = N / D
3652  APInt I = AD.multiplicativeInverse(Mod);
3653
3654  // 4. Compute the minimum unsigned root of the equation:
3655  // I * (B / D) mod (N / D)
3656  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
3657
3658  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
3659  // bits.
3660  return SE.getConstant(Result.trunc(BW));
3661}
3662
3663/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
3664/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
3665/// might be the same) or two SCEVCouldNotCompute objects.
3666///
3667static std::pair<SCEVHandle,SCEVHandle>
3668SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
3669  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
3670  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
3671  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
3672  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
3673
3674  // We currently can only solve this if the coefficients are constants.
3675  if (!LC || !MC || !NC) {
3676    const SCEV *CNC = SE.getCouldNotCompute();
3677    return std::make_pair(CNC, CNC);
3678  }
3679
3680  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
3681  const APInt &L = LC->getValue()->getValue();
3682  const APInt &M = MC->getValue()->getValue();
3683  const APInt &N = NC->getValue()->getValue();
3684  APInt Two(BitWidth, 2);
3685  APInt Four(BitWidth, 4);
3686
3687  {
3688    using namespace APIntOps;
3689    const APInt& C = L;
3690    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
3691    // The B coefficient is M-N/2
3692    APInt B(M);
3693    B -= sdiv(N,Two);
3694
3695    // The A coefficient is N/2
3696    APInt A(N.sdiv(Two));
3697
3698    // Compute the B^2-4ac term.
3699    APInt SqrtTerm(B);
3700    SqrtTerm *= B;
3701    SqrtTerm -= Four * (A * C);
3702
3703    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
3704    // integer value or else APInt::sqrt() will assert.
3705    APInt SqrtVal(SqrtTerm.sqrt());
3706
3707    // Compute the two solutions for the quadratic formula.
3708    // The divisions must be performed as signed divisions.
3709    APInt NegB(-B);
3710    APInt TwoA( A << 1 );
3711    if (TwoA.isMinValue()) {
3712      const SCEV *CNC = SE.getCouldNotCompute();
3713      return std::make_pair(CNC, CNC);
3714    }
3715
3716    ConstantInt *Solution1 = ConstantInt::get((NegB + SqrtVal).sdiv(TwoA));
3717    ConstantInt *Solution2 = ConstantInt::get((NegB - SqrtVal).sdiv(TwoA));
3718
3719    return std::make_pair(SE.getConstant(Solution1),
3720                          SE.getConstant(Solution2));
3721    } // end APIntOps namespace
3722}
3723
3724/// HowFarToZero - Return the number of times a backedge comparing the specified
3725/// value to zero will execute.  If not computable, return CouldNotCompute.
3726SCEVHandle ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
3727  // If the value is a constant
3728  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3729    // If the value is already zero, the branch will execute zero times.
3730    if (C->getValue()->isZero()) return C;
3731    return CouldNotCompute;  // Otherwise it will loop infinitely.
3732  }
3733
3734  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
3735  if (!AddRec || AddRec->getLoop() != L)
3736    return CouldNotCompute;
3737
3738  if (AddRec->isAffine()) {
3739    // If this is an affine expression, the execution count of this branch is
3740    // the minimum unsigned root of the following equation:
3741    //
3742    //     Start + Step*N = 0 (mod 2^BW)
3743    //
3744    // equivalent to:
3745    //
3746    //             Step*N = -Start (mod 2^BW)
3747    //
3748    // where BW is the common bit width of Start and Step.
3749
3750    // Get the initial value for the loop.
3751    SCEVHandle Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
3752    SCEVHandle Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
3753
3754    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
3755      // For now we handle only constant steps.
3756
3757      // First, handle unitary steps.
3758      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
3759        return getNegativeSCEV(Start);       //   N = -Start (as unsigned)
3760      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
3761        return Start;                           //    N = Start (as unsigned)
3762
3763      // Then, try to solve the above equation provided that Start is constant.
3764      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
3765        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
3766                                            -StartC->getValue()->getValue(),
3767                                            *this);
3768    }
3769  } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
3770    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
3771    // the quadratic equation to solve it.
3772    std::pair<SCEVHandle,SCEVHandle> Roots = SolveQuadraticEquation(AddRec,
3773                                                                    *this);
3774    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
3775    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
3776    if (R1) {
3777#if 0
3778      errs() << "HFTZ: " << *V << " - sol#1: " << *R1
3779             << "  sol#2: " << *R2 << "\n";
3780#endif
3781      // Pick the smallest positive root value.
3782      if (ConstantInt *CB =
3783          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
3784                                   R1->getValue(), R2->getValue()))) {
3785        if (CB->getZExtValue() == false)
3786          std::swap(R1, R2);   // R1 is the minimum root now.
3787
3788        // We can only use this value if the chrec ends up with an exact zero
3789        // value at this index.  When solving for "X*X != 5", for example, we
3790        // should not accept a root of 2.
3791        SCEVHandle Val = AddRec->evaluateAtIteration(R1, *this);
3792        if (Val->isZero())
3793          return R1;  // We found a quadratic root!
3794      }
3795    }
3796  }
3797
3798  return CouldNotCompute;
3799}
3800
3801/// HowFarToNonZero - Return the number of times a backedge checking the
3802/// specified value for nonzero will execute.  If not computable, return
3803/// CouldNotCompute
3804SCEVHandle ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
3805  // Loops that look like: while (X == 0) are very strange indeed.  We don't
3806  // handle them yet except for the trivial case.  This could be expanded in the
3807  // future as needed.
3808
3809  // If the value is a constant, check to see if it is known to be non-zero
3810  // already.  If so, the backedge will execute zero times.
3811  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3812    if (!C->getValue()->isNullValue())
3813      return getIntegerSCEV(0, C->getType());
3814    return CouldNotCompute;  // Otherwise it will loop infinitely.
3815  }
3816
3817  // We could implement others, but I really doubt anyone writes loops like
3818  // this, and if they did, they would already be constant folded.
3819  return CouldNotCompute;
3820}
3821
3822/// getLoopPredecessor - If the given loop's header has exactly one unique
3823/// predecessor outside the loop, return it. Otherwise return null.
3824///
3825BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
3826  BasicBlock *Header = L->getHeader();
3827  BasicBlock *Pred = 0;
3828  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
3829       PI != E; ++PI)
3830    if (!L->contains(*PI)) {
3831      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
3832      Pred = *PI;
3833    }
3834  return Pred;
3835}
3836
3837/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
3838/// (which may not be an immediate predecessor) which has exactly one
3839/// successor from which BB is reachable, or null if no such block is
3840/// found.
3841///
3842BasicBlock *
3843ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
3844  // If the block has a unique predecessor, then there is no path from the
3845  // predecessor to the block that does not go through the direct edge
3846  // from the predecessor to the block.
3847  if (BasicBlock *Pred = BB->getSinglePredecessor())
3848    return Pred;
3849
3850  // A loop's header is defined to be a block that dominates the loop.
3851  // If the header has a unique predecessor outside the loop, it must be
3852  // a block that has exactly one successor that can reach the loop.
3853  if (Loop *L = LI->getLoopFor(BB))
3854    return getLoopPredecessor(L);
3855
3856  return 0;
3857}
3858
3859/// HasSameValue - SCEV structural equivalence is usually sufficient for
3860/// testing whether two expressions are equal, however for the purposes of
3861/// looking for a condition guarding a loop, it can be useful to be a little
3862/// more general, since a front-end may have replicated the controlling
3863/// expression.
3864///
3865static bool HasSameValue(const SCEVHandle &A, const SCEVHandle &B) {
3866  // Quick check to see if they are the same SCEV.
3867  if (A == B) return true;
3868
3869  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
3870  // two different instructions with the same value. Check for this case.
3871  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
3872    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
3873      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
3874        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
3875          if (AI->isIdenticalTo(BI))
3876            return true;
3877
3878  // Otherwise assume they may have a different value.
3879  return false;
3880}
3881
3882/// isLoopGuardedByCond - Test whether entry to the loop is protected by
3883/// a conditional between LHS and RHS.  This is used to help avoid max
3884/// expressions in loop trip counts.
3885bool ScalarEvolution::isLoopGuardedByCond(const Loop *L,
3886                                          ICmpInst::Predicate Pred,
3887                                          const SCEV *LHS, const SCEV *RHS) {
3888  // Interpret a null as meaning no loop, where there is obviously no guard
3889  // (interprocedural conditions notwithstanding).
3890  if (!L) return false;
3891
3892  BasicBlock *Predecessor = getLoopPredecessor(L);
3893  BasicBlock *PredecessorDest = L->getHeader();
3894
3895  // Starting at the loop predecessor, climb up the predecessor chain, as long
3896  // as there are predecessors that can be found that have unique successors
3897  // leading to the original header.
3898  for (; Predecessor;
3899       PredecessorDest = Predecessor,
3900       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
3901
3902    BranchInst *LoopEntryPredicate =
3903      dyn_cast<BranchInst>(Predecessor->getTerminator());
3904    if (!LoopEntryPredicate ||
3905        LoopEntryPredicate->isUnconditional())
3906      continue;
3907
3908    ICmpInst *ICI = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition());
3909    if (!ICI) continue;
3910
3911    // Now that we found a conditional branch that dominates the loop, check to
3912    // see if it is the comparison we are looking for.
3913    Value *PreCondLHS = ICI->getOperand(0);
3914    Value *PreCondRHS = ICI->getOperand(1);
3915    ICmpInst::Predicate Cond;
3916    if (LoopEntryPredicate->getSuccessor(0) == PredecessorDest)
3917      Cond = ICI->getPredicate();
3918    else
3919      Cond = ICI->getInversePredicate();
3920
3921    if (Cond == Pred)
3922      ; // An exact match.
3923    else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE)
3924      ; // The actual condition is beyond sufficient.
3925    else
3926      // Check a few special cases.
3927      switch (Cond) {
3928      case ICmpInst::ICMP_UGT:
3929        if (Pred == ICmpInst::ICMP_ULT) {
3930          std::swap(PreCondLHS, PreCondRHS);
3931          Cond = ICmpInst::ICMP_ULT;
3932          break;
3933        }
3934        continue;
3935      case ICmpInst::ICMP_SGT:
3936        if (Pred == ICmpInst::ICMP_SLT) {
3937          std::swap(PreCondLHS, PreCondRHS);
3938          Cond = ICmpInst::ICMP_SLT;
3939          break;
3940        }
3941        continue;
3942      case ICmpInst::ICMP_NE:
3943        // Expressions like (x >u 0) are often canonicalized to (x != 0),
3944        // so check for this case by checking if the NE is comparing against
3945        // a minimum or maximum constant.
3946        if (!ICmpInst::isTrueWhenEqual(Pred))
3947          if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) {
3948            const APInt &A = CI->getValue();
3949            switch (Pred) {
3950            case ICmpInst::ICMP_SLT:
3951              if (A.isMaxSignedValue()) break;
3952              continue;
3953            case ICmpInst::ICMP_SGT:
3954              if (A.isMinSignedValue()) break;
3955              continue;
3956            case ICmpInst::ICMP_ULT:
3957              if (A.isMaxValue()) break;
3958              continue;
3959            case ICmpInst::ICMP_UGT:
3960              if (A.isMinValue()) break;
3961              continue;
3962            default:
3963              continue;
3964            }
3965            Cond = ICmpInst::ICMP_NE;
3966            // NE is symmetric but the original comparison may not be. Swap
3967            // the operands if necessary so that they match below.
3968            if (isa<SCEVConstant>(LHS))
3969              std::swap(PreCondLHS, PreCondRHS);
3970            break;
3971          }
3972        continue;
3973      default:
3974        // We weren't able to reconcile the condition.
3975        continue;
3976      }
3977
3978    if (!PreCondLHS->getType()->isInteger()) continue;
3979
3980    SCEVHandle PreCondLHSSCEV = getSCEV(PreCondLHS);
3981    SCEVHandle PreCondRHSSCEV = getSCEV(PreCondRHS);
3982    if ((HasSameValue(LHS, PreCondLHSSCEV) &&
3983         HasSameValue(RHS, PreCondRHSSCEV)) ||
3984        (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) &&
3985         HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV))))
3986      return true;
3987  }
3988
3989  return false;
3990}
3991
3992/// getBECount - Subtract the end and start values and divide by the step,
3993/// rounding up, to get the number of times the backedge is executed. Return
3994/// CouldNotCompute if an intermediate computation overflows.
3995SCEVHandle ScalarEvolution::getBECount(const SCEVHandle &Start,
3996                                       const SCEVHandle &End,
3997                                       const SCEVHandle &Step) {
3998  const Type *Ty = Start->getType();
3999  SCEVHandle NegOne = getIntegerSCEV(-1, Ty);
4000  SCEVHandle Diff = getMinusSCEV(End, Start);
4001  SCEVHandle RoundUp = getAddExpr(Step, NegOne);
4002
4003  // Add an adjustment to the difference between End and Start so that
4004  // the division will effectively round up.
4005  SCEVHandle Add = getAddExpr(Diff, RoundUp);
4006
4007  // Check Add for unsigned overflow.
4008  // TODO: More sophisticated things could be done here.
4009  const Type *WideTy = IntegerType::get(getTypeSizeInBits(Ty) + 1);
4010  SCEVHandle OperandExtendedAdd =
4011    getAddExpr(getZeroExtendExpr(Diff, WideTy),
4012               getZeroExtendExpr(RoundUp, WideTy));
4013  if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4014    return CouldNotCompute;
4015
4016  return getUDivExpr(Add, Step);
4017}
4018
4019/// HowManyLessThans - Return the number of times a backedge containing the
4020/// specified less-than comparison will execute.  If not computable, return
4021/// CouldNotCompute.
4022ScalarEvolution::BackedgeTakenInfo ScalarEvolution::
4023HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4024                 const Loop *L, bool isSigned) {
4025  // Only handle:  "ADDREC < LoopInvariant".
4026  if (!RHS->isLoopInvariant(L)) return CouldNotCompute;
4027
4028  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4029  if (!AddRec || AddRec->getLoop() != L)
4030    return CouldNotCompute;
4031
4032  if (AddRec->isAffine()) {
4033    // FORNOW: We only support unit strides.
4034    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4035    SCEVHandle Step = AddRec->getStepRecurrence(*this);
4036
4037    // TODO: handle non-constant strides.
4038    const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4039    if (!CStep || CStep->isZero())
4040      return CouldNotCompute;
4041    if (CStep->isOne()) {
4042      // With unit stride, the iteration never steps past the limit value.
4043    } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4044      if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4045        // Test whether a positive iteration iteration can step past the limit
4046        // value and past the maximum value for its type in a single step.
4047        if (isSigned) {
4048          APInt Max = APInt::getSignedMaxValue(BitWidth);
4049          if ((Max - CStep->getValue()->getValue())
4050                .slt(CLimit->getValue()->getValue()))
4051            return CouldNotCompute;
4052        } else {
4053          APInt Max = APInt::getMaxValue(BitWidth);
4054          if ((Max - CStep->getValue()->getValue())
4055                .ult(CLimit->getValue()->getValue()))
4056            return CouldNotCompute;
4057        }
4058      } else
4059        // TODO: handle non-constant limit values below.
4060        return CouldNotCompute;
4061    } else
4062      // TODO: handle negative strides below.
4063      return CouldNotCompute;
4064
4065    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4066    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
4067    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4068    // treat m-n as signed nor unsigned due to overflow possibility.
4069
4070    // First, we get the value of the LHS in the first iteration: n
4071    SCEVHandle Start = AddRec->getOperand(0);
4072
4073    // Determine the minimum constant start value.
4074    SCEVHandle MinStart = isa<SCEVConstant>(Start) ? Start :
4075      getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) :
4076                             APInt::getMinValue(BitWidth));
4077
4078    // If we know that the condition is true in order to enter the loop,
4079    // then we know that it will run exactly (m-n)/s times. Otherwise, we
4080    // only know that it will execute (max(m,n)-n)/s times. In both cases,
4081    // the division must round up.
4082    SCEVHandle End = RHS;
4083    if (!isLoopGuardedByCond(L,
4084                             isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
4085                             getMinusSCEV(Start, Step), RHS))
4086      End = isSigned ? getSMaxExpr(RHS, Start)
4087                     : getUMaxExpr(RHS, Start);
4088
4089    // Determine the maximum constant end value.
4090    SCEVHandle MaxEnd =
4091      isa<SCEVConstant>(End) ? End :
4092      getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth)
4093                               .ashr(GetMinSignBits(End) - 1) :
4094                             APInt::getMaxValue(BitWidth)
4095                               .lshr(GetMinLeadingZeros(End)));
4096
4097    // Finally, we subtract these two values and divide, rounding up, to get
4098    // the number of times the backedge is executed.
4099    SCEVHandle BECount = getBECount(Start, End, Step);
4100
4101    // The maximum backedge count is similar, except using the minimum start
4102    // value and the maximum end value.
4103    SCEVHandle MaxBECount = getBECount(MinStart, MaxEnd, Step);;
4104
4105    return BackedgeTakenInfo(BECount, MaxBECount);
4106  }
4107
4108  return CouldNotCompute;
4109}
4110
4111/// getNumIterationsInRange - Return the number of iterations of this loop that
4112/// produce values in the specified constant range.  Another way of looking at
4113/// this is that it returns the first iteration number where the value is not in
4114/// the condition, thus computing the exit count. If the iteration count can't
4115/// be computed, an instance of SCEVCouldNotCompute is returned.
4116SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4117                                                   ScalarEvolution &SE) const {
4118  if (Range.isFullSet())  // Infinite loop.
4119    return SE.getCouldNotCompute();
4120
4121  // If the start is a non-zero constant, shift the range to simplify things.
4122  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4123    if (!SC->getValue()->isZero()) {
4124      SmallVector<SCEVHandle, 4> Operands(op_begin(), op_end());
4125      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4126      SCEVHandle Shifted = SE.getAddRecExpr(Operands, getLoop());
4127      if (const SCEVAddRecExpr *ShiftedAddRec =
4128            dyn_cast<SCEVAddRecExpr>(Shifted))
4129        return ShiftedAddRec->getNumIterationsInRange(
4130                           Range.subtract(SC->getValue()->getValue()), SE);
4131      // This is strange and shouldn't happen.
4132      return SE.getCouldNotCompute();
4133    }
4134
4135  // The only time we can solve this is when we have all constant indices.
4136  // Otherwise, we cannot determine the overflow conditions.
4137  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4138    if (!isa<SCEVConstant>(getOperand(i)))
4139      return SE.getCouldNotCompute();
4140
4141
4142  // Okay at this point we know that all elements of the chrec are constants and
4143  // that the start element is zero.
4144
4145  // First check to see if the range contains zero.  If not, the first
4146  // iteration exits.
4147  unsigned BitWidth = SE.getTypeSizeInBits(getType());
4148  if (!Range.contains(APInt(BitWidth, 0)))
4149    return SE.getIntegerSCEV(0, getType());
4150
4151  if (isAffine()) {
4152    // If this is an affine expression then we have this situation:
4153    //   Solve {0,+,A} in Range  ===  Ax in Range
4154
4155    // We know that zero is in the range.  If A is positive then we know that
4156    // the upper value of the range must be the first possible exit value.
4157    // If A is negative then the lower of the range is the last possible loop
4158    // value.  Also note that we already checked for a full range.
4159    APInt One(BitWidth,1);
4160    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4161    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4162
4163    // The exit value should be (End+A)/A.
4164    APInt ExitVal = (End + A).udiv(A);
4165    ConstantInt *ExitValue = ConstantInt::get(ExitVal);
4166
4167    // Evaluate at the exit value.  If we really did fall out of the valid
4168    // range, then we computed our trip count, otherwise wrap around or other
4169    // things must have happened.
4170    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4171    if (Range.contains(Val->getValue()))
4172      return SE.getCouldNotCompute();  // Something strange happened
4173
4174    // Ensure that the previous value is in the range.  This is a sanity check.
4175    assert(Range.contains(
4176           EvaluateConstantChrecAtConstant(this,
4177           ConstantInt::get(ExitVal - One), SE)->getValue()) &&
4178           "Linear scev computation is off in a bad way!");
4179    return SE.getConstant(ExitValue);
4180  } else if (isQuadratic()) {
4181    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4182    // quadratic equation to solve it.  To do this, we must frame our problem in
4183    // terms of figuring out when zero is crossed, instead of when
4184    // Range.getUpper() is crossed.
4185    SmallVector<SCEVHandle, 4> NewOps(op_begin(), op_end());
4186    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4187    SCEVHandle NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4188
4189    // Next, solve the constructed addrec
4190    std::pair<SCEVHandle,SCEVHandle> Roots =
4191      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4192    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4193    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4194    if (R1) {
4195      // Pick the smallest positive root value.
4196      if (ConstantInt *CB =
4197          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4198                                   R1->getValue(), R2->getValue()))) {
4199        if (CB->getZExtValue() == false)
4200          std::swap(R1, R2);   // R1 is the minimum root now.
4201
4202        // Make sure the root is not off by one.  The returned iteration should
4203        // not be in the range, but the previous one should be.  When solving
4204        // for "X*X < 5", for example, we should not return a root of 2.
4205        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4206                                                             R1->getValue(),
4207                                                             SE);
4208        if (Range.contains(R1Val->getValue())) {
4209          // The next iteration must be out of the range...
4210          ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()+1);
4211
4212          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4213          if (!Range.contains(R1Val->getValue()))
4214            return SE.getConstant(NextVal);
4215          return SE.getCouldNotCompute();  // Something strange happened
4216        }
4217
4218        // If R1 was not in the range, then it is a good return value.  Make
4219        // sure that R1-1 WAS in the range though, just in case.
4220        ConstantInt *NextVal = ConstantInt::get(R1->getValue()->getValue()-1);
4221        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4222        if (Range.contains(R1Val->getValue()))
4223          return R1;
4224        return SE.getCouldNotCompute();  // Something strange happened
4225      }
4226    }
4227  }
4228
4229  return SE.getCouldNotCompute();
4230}
4231
4232
4233
4234//===----------------------------------------------------------------------===//
4235//                   SCEVCallbackVH Class Implementation
4236//===----------------------------------------------------------------------===//
4237
4238void ScalarEvolution::SCEVCallbackVH::deleted() {
4239  assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4240  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4241    SE->ConstantEvolutionLoopExitValue.erase(PN);
4242  if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4243    SE->ValuesAtScopes.erase(I);
4244  SE->Scalars.erase(getValPtr());
4245  // this now dangles!
4246}
4247
4248void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4249  assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4250
4251  // Forget all the expressions associated with users of the old value,
4252  // so that future queries will recompute the expressions using the new
4253  // value.
4254  SmallVector<User *, 16> Worklist;
4255  Value *Old = getValPtr();
4256  bool DeleteOld = false;
4257  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4258       UI != UE; ++UI)
4259    Worklist.push_back(*UI);
4260  while (!Worklist.empty()) {
4261    User *U = Worklist.pop_back_val();
4262    // Deleting the Old value will cause this to dangle. Postpone
4263    // that until everything else is done.
4264    if (U == Old) {
4265      DeleteOld = true;
4266      continue;
4267    }
4268    if (PHINode *PN = dyn_cast<PHINode>(U))
4269      SE->ConstantEvolutionLoopExitValue.erase(PN);
4270    if (Instruction *I = dyn_cast<Instruction>(U))
4271      SE->ValuesAtScopes.erase(I);
4272    if (SE->Scalars.erase(U))
4273      for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4274           UI != UE; ++UI)
4275        Worklist.push_back(*UI);
4276  }
4277  if (DeleteOld) {
4278    if (PHINode *PN = dyn_cast<PHINode>(Old))
4279      SE->ConstantEvolutionLoopExitValue.erase(PN);
4280    if (Instruction *I = dyn_cast<Instruction>(Old))
4281      SE->ValuesAtScopes.erase(I);
4282    SE->Scalars.erase(Old);
4283    // this now dangles!
4284  }
4285  // this may dangle!
4286}
4287
4288ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4289  : CallbackVH(V), SE(se) {}
4290
4291//===----------------------------------------------------------------------===//
4292//                   ScalarEvolution Class Implementation
4293//===----------------------------------------------------------------------===//
4294
4295ScalarEvolution::ScalarEvolution()
4296  : FunctionPass(&ID), CouldNotCompute(new SCEVCouldNotCompute(0)) {
4297}
4298
4299bool ScalarEvolution::runOnFunction(Function &F) {
4300  this->F = &F;
4301  LI = &getAnalysis<LoopInfo>();
4302  TD = getAnalysisIfAvailable<TargetData>();
4303  return false;
4304}
4305
4306void ScalarEvolution::releaseMemory() {
4307  Scalars.clear();
4308  BackedgeTakenCounts.clear();
4309  ConstantEvolutionLoopExitValue.clear();
4310  ValuesAtScopes.clear();
4311}
4312
4313void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4314  AU.setPreservesAll();
4315  AU.addRequiredTransitive<LoopInfo>();
4316}
4317
4318bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4319  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4320}
4321
4322static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4323                          const Loop *L) {
4324  // Print all inner loops first
4325  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4326    PrintLoopInfo(OS, SE, *I);
4327
4328  OS << "Loop " << L->getHeader()->getName() << ": ";
4329
4330  SmallVector<BasicBlock*, 8> ExitBlocks;
4331  L->getExitBlocks(ExitBlocks);
4332  if (ExitBlocks.size() != 1)
4333    OS << "<multiple exits> ";
4334
4335  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4336    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4337  } else {
4338    OS << "Unpredictable backedge-taken count. ";
4339  }
4340
4341  OS << "\n";
4342}
4343
4344void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4345  // ScalarEvolution's implementaiton of the print method is to print
4346  // out SCEV values of all instructions that are interesting. Doing
4347  // this potentially causes it to create new SCEV objects though,
4348  // which technically conflicts with the const qualifier. This isn't
4349  // observable from outside the class though (the hasSCEV function
4350  // notwithstanding), so casting away the const isn't dangerous.
4351  ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4352
4353  OS << "Classifying expressions for: " << F->getName() << "\n";
4354  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4355    if (isSCEVable(I->getType())) {
4356      OS << *I;
4357      OS << "  -->  ";
4358      SCEVHandle SV = SE.getSCEV(&*I);
4359      SV->print(OS);
4360
4361      const Loop *L = LI->getLoopFor((*I).getParent());
4362
4363      SCEVHandle AtUse = SE.getSCEVAtScope(SV, L);
4364      if (AtUse != SV) {
4365        OS << "  -->  ";
4366        AtUse->print(OS);
4367      }
4368
4369      if (L) {
4370        OS << "\t\t" "Exits: ";
4371        SCEVHandle ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
4372        if (!ExitValue->isLoopInvariant(L)) {
4373          OS << "<<Unknown>>";
4374        } else {
4375          OS << *ExitValue;
4376        }
4377      }
4378
4379      OS << "\n";
4380    }
4381
4382  OS << "Determining loop execution counts for: " << F->getName() << "\n";
4383  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
4384    PrintLoopInfo(OS, &SE, *I);
4385}
4386
4387void ScalarEvolution::print(std::ostream &o, const Module *M) const {
4388  raw_os_ostream OS(o);
4389  print(OS, M);
4390}
4391