ScalarEvolution.cpp revision 1b342583f6fc42f548912632f6aa24fc6e11986a
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle.  These classes are reference counted, managed by the const SCEV *
18// class.  We only create one SCEV of a particular shape, so pointer-comparisons
19// for equality are legal.
20//
21// One important aspect of the SCEV objects is that they are never cyclic, even
22// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
23// the PHI node is one of the idioms that we can represent (e.g., a polynomial
24// recurrence) then we represent it directly as a recurrence node, otherwise we
25// represent it as a SCEVUnknown node.
26//
27// In addition to being able to represent expressions of various types, we also
28// have folders that are used to build the *canonical* representation for a
29// particular expression.  These folders are capable of using a variety of
30// rewrite rules to simplify the expressions.
31//
32// Once the folders are defined, we can implement the more interesting
33// higher-level code, such as the code that recognizes PHI nodes of various
34// types, computes the execution count of a loop, etc.
35//
36// TODO: We should use these routines and value representations to implement
37// dependence analysis!
38//
39//===----------------------------------------------------------------------===//
40//
41// There are several good references for the techniques used in this analysis.
42//
43//  Chains of recurrences -- a method to expedite the evaluation
44//  of closed-form functions
45//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46//
47//  On computational properties of chains of recurrences
48//  Eugene V. Zima
49//
50//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51//  Robert A. van Engelen
52//
53//  Efficient Symbolic Analysis for Optimizing Compilers
54//  Robert A. van Engelen
55//
56//  Using the chains of recurrences algebra for data dependence testing and
57//  induction variable substitution
58//  MS Thesis, Johnie Birch
59//
60//===----------------------------------------------------------------------===//
61
62#define DEBUG_TYPE "scalar-evolution"
63#include "llvm/Analysis/ScalarEvolutionExpressions.h"
64#include "llvm/Constants.h"
65#include "llvm/DerivedTypes.h"
66#include "llvm/GlobalVariable.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Analysis/ConstantFolding.h"
70#include "llvm/Analysis/Dominators.h"
71#include "llvm/Analysis/LoopInfo.h"
72#include "llvm/Analysis/ValueTracking.h"
73#include "llvm/Assembly/Writer.h"
74#include "llvm/Target/TargetData.h"
75#include "llvm/Support/CommandLine.h"
76#include "llvm/Support/Compiler.h"
77#include "llvm/Support/ConstantRange.h"
78#include "llvm/Support/GetElementPtrTypeIterator.h"
79#include "llvm/Support/InstIterator.h"
80#include "llvm/Support/MathExtras.h"
81#include "llvm/Support/raw_ostream.h"
82#include "llvm/ADT/Statistic.h"
83#include "llvm/ADT/STLExtras.h"
84#include "llvm/ADT/SmallPtrSet.h"
85#include <algorithm>
86using namespace llvm;
87
88STATISTIC(NumArrayLenItCounts,
89          "Number of trip counts computed with array length");
90STATISTIC(NumTripCountsComputed,
91          "Number of loops with predictable loop counts");
92STATISTIC(NumTripCountsNotComputed,
93          "Number of loops without predictable loop counts");
94STATISTIC(NumBruteForceTripCountsComputed,
95          "Number of loops with trip counts computed by force");
96
97static cl::opt<unsigned>
98MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
99                        cl::desc("Maximum number of iterations SCEV will "
100                                 "symbolically execute a constant "
101                                 "derived loop"),
102                        cl::init(100));
103
104static RegisterPass<ScalarEvolution>
105R("scalar-evolution", "Scalar Evolution Analysis", false, true);
106char ScalarEvolution::ID = 0;
107
108//===----------------------------------------------------------------------===//
109//                           SCEV class definitions
110//===----------------------------------------------------------------------===//
111
112//===----------------------------------------------------------------------===//
113// Implementation of the SCEV class.
114//
115
116SCEV::~SCEV() {}
117
118void SCEV::dump() const {
119  print(errs());
120  errs() << '\n';
121}
122
123void SCEV::print(std::ostream &o) const {
124  raw_os_ostream OS(o);
125  print(OS);
126}
127
128bool SCEV::isZero() const {
129  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
130    return SC->getValue()->isZero();
131  return false;
132}
133
134bool SCEV::isOne() const {
135  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
136    return SC->getValue()->isOne();
137  return false;
138}
139
140bool SCEV::isAllOnesValue() const {
141  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
142    return SC->getValue()->isAllOnesValue();
143  return false;
144}
145
146SCEVCouldNotCompute::SCEVCouldNotCompute() :
147  SCEV(scCouldNotCompute) {}
148
149void SCEVCouldNotCompute::Profile(FoldingSetNodeID &ID) const {
150  assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
151}
152
153bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
154  assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
155  return false;
156}
157
158const Type *SCEVCouldNotCompute::getType() const {
159  assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
160  return 0;
161}
162
163bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
164  assert(0 && "Attempt to use a SCEVCouldNotCompute object!");
165  return false;
166}
167
168const SCEV *
169SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete(
170                                                    const SCEV *Sym,
171                                                    const SCEV *Conc,
172                                                    ScalarEvolution &SE) const {
173  return this;
174}
175
176void SCEVCouldNotCompute::print(raw_ostream &OS) const {
177  OS << "***COULDNOTCOMPUTE***";
178}
179
180bool SCEVCouldNotCompute::classof(const SCEV *S) {
181  return S->getSCEVType() == scCouldNotCompute;
182}
183
184const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
185  FoldingSetNodeID ID;
186  ID.AddInteger(scConstant);
187  ID.AddPointer(V);
188  void *IP = 0;
189  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
190  SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
191  new (S) SCEVConstant(V);
192  UniqueSCEVs.InsertNode(S, IP);
193  return S;
194}
195
196const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
197  return getConstant(ConstantInt::get(Val));
198}
199
200const SCEV *
201ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
202  return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
203}
204
205void SCEVConstant::Profile(FoldingSetNodeID &ID) const {
206  ID.AddInteger(scConstant);
207  ID.AddPointer(V);
208}
209
210const Type *SCEVConstant::getType() const { return V->getType(); }
211
212void SCEVConstant::print(raw_ostream &OS) const {
213  WriteAsOperand(OS, V, false);
214}
215
216SCEVCastExpr::SCEVCastExpr(unsigned SCEVTy,
217                           const SCEV *op, const Type *ty)
218  : SCEV(SCEVTy), Op(op), Ty(ty) {}
219
220void SCEVCastExpr::Profile(FoldingSetNodeID &ID) const {
221  ID.AddInteger(getSCEVType());
222  ID.AddPointer(Op);
223  ID.AddPointer(Ty);
224}
225
226bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
227  return Op->dominates(BB, DT);
228}
229
230SCEVTruncateExpr::SCEVTruncateExpr(const SCEV *op, const Type *ty)
231  : SCEVCastExpr(scTruncate, op, ty) {
232  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
233         (Ty->isInteger() || isa<PointerType>(Ty)) &&
234         "Cannot truncate non-integer value!");
235}
236
237void SCEVTruncateExpr::print(raw_ostream &OS) const {
238  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
239}
240
241SCEVZeroExtendExpr::SCEVZeroExtendExpr(const SCEV *op, const Type *ty)
242  : SCEVCastExpr(scZeroExtend, op, ty) {
243  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
244         (Ty->isInteger() || isa<PointerType>(Ty)) &&
245         "Cannot zero extend non-integer value!");
246}
247
248void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
249  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
250}
251
252SCEVSignExtendExpr::SCEVSignExtendExpr(const SCEV *op, const Type *ty)
253  : SCEVCastExpr(scSignExtend, op, ty) {
254  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
255         (Ty->isInteger() || isa<PointerType>(Ty)) &&
256         "Cannot sign extend non-integer value!");
257}
258
259void SCEVSignExtendExpr::print(raw_ostream &OS) const {
260  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
261}
262
263void SCEVCommutativeExpr::print(raw_ostream &OS) const {
264  assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
265  const char *OpStr = getOperationStr();
266  OS << "(" << *Operands[0];
267  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
268    OS << OpStr << *Operands[i];
269  OS << ")";
270}
271
272const SCEV *
273SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete(
274                                                    const SCEV *Sym,
275                                                    const SCEV *Conc,
276                                                    ScalarEvolution &SE) const {
277  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
278    const SCEV *H =
279      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
280    if (H != getOperand(i)) {
281      SmallVector<const SCEV *, 8> NewOps;
282      NewOps.reserve(getNumOperands());
283      for (unsigned j = 0; j != i; ++j)
284        NewOps.push_back(getOperand(j));
285      NewOps.push_back(H);
286      for (++i; i != e; ++i)
287        NewOps.push_back(getOperand(i)->
288                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
289
290      if (isa<SCEVAddExpr>(this))
291        return SE.getAddExpr(NewOps);
292      else if (isa<SCEVMulExpr>(this))
293        return SE.getMulExpr(NewOps);
294      else if (isa<SCEVSMaxExpr>(this))
295        return SE.getSMaxExpr(NewOps);
296      else if (isa<SCEVUMaxExpr>(this))
297        return SE.getUMaxExpr(NewOps);
298      else
299        assert(0 && "Unknown commutative expr!");
300    }
301  }
302  return this;
303}
304
305void SCEVNAryExpr::Profile(FoldingSetNodeID &ID) const {
306  ID.AddInteger(getSCEVType());
307  ID.AddInteger(Operands.size());
308  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
309    ID.AddPointer(Operands[i]);
310}
311
312bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
313  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
314    if (!getOperand(i)->dominates(BB, DT))
315      return false;
316  }
317  return true;
318}
319
320void SCEVUDivExpr::Profile(FoldingSetNodeID &ID) const {
321  ID.AddInteger(scUDivExpr);
322  ID.AddPointer(LHS);
323  ID.AddPointer(RHS);
324}
325
326bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
327  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
328}
329
330void SCEVUDivExpr::print(raw_ostream &OS) const {
331  OS << "(" << *LHS << " /u " << *RHS << ")";
332}
333
334const Type *SCEVUDivExpr::getType() const {
335  // In most cases the types of LHS and RHS will be the same, but in some
336  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
337  // depend on the type for correctness, but handling types carefully can
338  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
339  // a pointer type than the RHS, so use the RHS' type here.
340  return RHS->getType();
341}
342
343void SCEVAddRecExpr::Profile(FoldingSetNodeID &ID) const {
344  ID.AddInteger(scAddRecExpr);
345  ID.AddInteger(Operands.size());
346  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
347    ID.AddPointer(Operands[i]);
348  ID.AddPointer(L);
349}
350
351const SCEV *
352SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym,
353                                                  const SCEV *Conc,
354                                                  ScalarEvolution &SE) const {
355  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
356    const SCEV *H =
357      getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
358    if (H != getOperand(i)) {
359      SmallVector<const SCEV *, 8> NewOps;
360      NewOps.reserve(getNumOperands());
361      for (unsigned j = 0; j != i; ++j)
362        NewOps.push_back(getOperand(j));
363      NewOps.push_back(H);
364      for (++i; i != e; ++i)
365        NewOps.push_back(getOperand(i)->
366                         replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
367
368      return SE.getAddRecExpr(NewOps, L);
369    }
370  }
371  return this;
372}
373
374
375bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
376  // Add recurrences are never invariant in the function-body (null loop).
377  if (!QueryLoop)
378    return false;
379
380  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
381  if (QueryLoop->contains(L->getHeader()))
382    return false;
383
384  // This recurrence is variant w.r.t. QueryLoop if any of its operands
385  // are variant.
386  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
387    if (!getOperand(i)->isLoopInvariant(QueryLoop))
388      return false;
389
390  // Otherwise it's loop-invariant.
391  return true;
392}
393
394void SCEVAddRecExpr::print(raw_ostream &OS) const {
395  OS << "{" << *Operands[0];
396  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
397    OS << ",+," << *Operands[i];
398  OS << "}<" << L->getHeader()->getName() + ">";
399}
400
401void SCEVUnknown::Profile(FoldingSetNodeID &ID) const {
402  ID.AddInteger(scUnknown);
403  ID.AddPointer(V);
404}
405
406bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
407  // All non-instruction values are loop invariant.  All instructions are loop
408  // invariant if they are not contained in the specified loop.
409  // Instructions are never considered invariant in the function body
410  // (null loop) because they are defined within the "loop".
411  if (Instruction *I = dyn_cast<Instruction>(V))
412    return L && !L->contains(I->getParent());
413  return true;
414}
415
416bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
417  if (Instruction *I = dyn_cast<Instruction>(getValue()))
418    return DT->dominates(I->getParent(), BB);
419  return true;
420}
421
422const Type *SCEVUnknown::getType() const {
423  return V->getType();
424}
425
426void SCEVUnknown::print(raw_ostream &OS) const {
427  WriteAsOperand(OS, V, false);
428}
429
430//===----------------------------------------------------------------------===//
431//                               SCEV Utilities
432//===----------------------------------------------------------------------===//
433
434namespace {
435  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
436  /// than the complexity of the RHS.  This comparator is used to canonicalize
437  /// expressions.
438  class VISIBILITY_HIDDEN SCEVComplexityCompare {
439    LoopInfo *LI;
440  public:
441    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
442
443    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
444      // Primarily, sort the SCEVs by their getSCEVType().
445      if (LHS->getSCEVType() != RHS->getSCEVType())
446        return LHS->getSCEVType() < RHS->getSCEVType();
447
448      // Aside from the getSCEVType() ordering, the particular ordering
449      // isn't very important except that it's beneficial to be consistent,
450      // so that (a + b) and (b + a) don't end up as different expressions.
451
452      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
453      // not as complete as it could be.
454      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
455        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
456
457        // Order pointer values after integer values. This helps SCEVExpander
458        // form GEPs.
459        if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
460          return false;
461        if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
462          return true;
463
464        // Compare getValueID values.
465        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
466          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
467
468        // Sort arguments by their position.
469        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
470          const Argument *RA = cast<Argument>(RU->getValue());
471          return LA->getArgNo() < RA->getArgNo();
472        }
473
474        // For instructions, compare their loop depth, and their opcode.
475        // This is pretty loose.
476        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
477          Instruction *RV = cast<Instruction>(RU->getValue());
478
479          // Compare loop depths.
480          if (LI->getLoopDepth(LV->getParent()) !=
481              LI->getLoopDepth(RV->getParent()))
482            return LI->getLoopDepth(LV->getParent()) <
483                   LI->getLoopDepth(RV->getParent());
484
485          // Compare opcodes.
486          if (LV->getOpcode() != RV->getOpcode())
487            return LV->getOpcode() < RV->getOpcode();
488
489          // Compare the number of operands.
490          if (LV->getNumOperands() != RV->getNumOperands())
491            return LV->getNumOperands() < RV->getNumOperands();
492        }
493
494        return false;
495      }
496
497      // Compare constant values.
498      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
499        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
500        if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
501          return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
502        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
503      }
504
505      // Compare addrec loop depths.
506      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
507        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
508        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
509          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
510      }
511
512      // Lexicographically compare n-ary expressions.
513      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
514        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
515        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
516          if (i >= RC->getNumOperands())
517            return false;
518          if (operator()(LC->getOperand(i), RC->getOperand(i)))
519            return true;
520          if (operator()(RC->getOperand(i), LC->getOperand(i)))
521            return false;
522        }
523        return LC->getNumOperands() < RC->getNumOperands();
524      }
525
526      // Lexicographically compare udiv expressions.
527      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
528        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
529        if (operator()(LC->getLHS(), RC->getLHS()))
530          return true;
531        if (operator()(RC->getLHS(), LC->getLHS()))
532          return false;
533        if (operator()(LC->getRHS(), RC->getRHS()))
534          return true;
535        if (operator()(RC->getRHS(), LC->getRHS()))
536          return false;
537        return false;
538      }
539
540      // Compare cast expressions by operand.
541      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
542        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
543        return operator()(LC->getOperand(), RC->getOperand());
544      }
545
546      assert(0 && "Unknown SCEV kind!");
547      return false;
548    }
549  };
550}
551
552/// GroupByComplexity - Given a list of SCEV objects, order them by their
553/// complexity, and group objects of the same complexity together by value.
554/// When this routine is finished, we know that any duplicates in the vector are
555/// consecutive and that complexity is monotonically increasing.
556///
557/// Note that we go take special precautions to ensure that we get determinstic
558/// results from this routine.  In other words, we don't want the results of
559/// this to depend on where the addresses of various SCEV objects happened to
560/// land in memory.
561///
562static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
563                              LoopInfo *LI) {
564  if (Ops.size() < 2) return;  // Noop
565  if (Ops.size() == 2) {
566    // This is the common case, which also happens to be trivially simple.
567    // Special case it.
568    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
569      std::swap(Ops[0], Ops[1]);
570    return;
571  }
572
573  // Do the rough sort by complexity.
574  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
575
576  // Now that we are sorted by complexity, group elements of the same
577  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
578  // be extremely short in practice.  Note that we take this approach because we
579  // do not want to depend on the addresses of the objects we are grouping.
580  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
581    const SCEV *S = Ops[i];
582    unsigned Complexity = S->getSCEVType();
583
584    // If there are any objects of the same complexity and same value as this
585    // one, group them.
586    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
587      if (Ops[j] == S) { // Found a duplicate.
588        // Move it to immediately after i'th element.
589        std::swap(Ops[i+1], Ops[j]);
590        ++i;   // no need to rescan it.
591        if (i == e-2) return;  // Done!
592      }
593    }
594  }
595}
596
597
598
599//===----------------------------------------------------------------------===//
600//                      Simple SCEV method implementations
601//===----------------------------------------------------------------------===//
602
603/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
604/// Assume, K > 0.
605static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
606                                      ScalarEvolution &SE,
607                                      const Type* ResultTy) {
608  // Handle the simplest case efficiently.
609  if (K == 1)
610    return SE.getTruncateOrZeroExtend(It, ResultTy);
611
612  // We are using the following formula for BC(It, K):
613  //
614  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
615  //
616  // Suppose, W is the bitwidth of the return value.  We must be prepared for
617  // overflow.  Hence, we must assure that the result of our computation is
618  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
619  // safe in modular arithmetic.
620  //
621  // However, this code doesn't use exactly that formula; the formula it uses
622  // is something like the following, where T is the number of factors of 2 in
623  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
624  // exponentiation:
625  //
626  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
627  //
628  // This formula is trivially equivalent to the previous formula.  However,
629  // this formula can be implemented much more efficiently.  The trick is that
630  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
631  // arithmetic.  To do exact division in modular arithmetic, all we have
632  // to do is multiply by the inverse.  Therefore, this step can be done at
633  // width W.
634  //
635  // The next issue is how to safely do the division by 2^T.  The way this
636  // is done is by doing the multiplication step at a width of at least W + T
637  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
638  // when we perform the division by 2^T (which is equivalent to a right shift
639  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
640  // truncated out after the division by 2^T.
641  //
642  // In comparison to just directly using the first formula, this technique
643  // is much more efficient; using the first formula requires W * K bits,
644  // but this formula less than W + K bits. Also, the first formula requires
645  // a division step, whereas this formula only requires multiplies and shifts.
646  //
647  // It doesn't matter whether the subtraction step is done in the calculation
648  // width or the input iteration count's width; if the subtraction overflows,
649  // the result must be zero anyway.  We prefer here to do it in the width of
650  // the induction variable because it helps a lot for certain cases; CodeGen
651  // isn't smart enough to ignore the overflow, which leads to much less
652  // efficient code if the width of the subtraction is wider than the native
653  // register width.
654  //
655  // (It's possible to not widen at all by pulling out factors of 2 before
656  // the multiplication; for example, K=2 can be calculated as
657  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
658  // extra arithmetic, so it's not an obvious win, and it gets
659  // much more complicated for K > 3.)
660
661  // Protection from insane SCEVs; this bound is conservative,
662  // but it probably doesn't matter.
663  if (K > 1000)
664    return SE.getCouldNotCompute();
665
666  unsigned W = SE.getTypeSizeInBits(ResultTy);
667
668  // Calculate K! / 2^T and T; we divide out the factors of two before
669  // multiplying for calculating K! / 2^T to avoid overflow.
670  // Other overflow doesn't matter because we only care about the bottom
671  // W bits of the result.
672  APInt OddFactorial(W, 1);
673  unsigned T = 1;
674  for (unsigned i = 3; i <= K; ++i) {
675    APInt Mult(W, i);
676    unsigned TwoFactors = Mult.countTrailingZeros();
677    T += TwoFactors;
678    Mult = Mult.lshr(TwoFactors);
679    OddFactorial *= Mult;
680  }
681
682  // We need at least W + T bits for the multiplication step
683  unsigned CalculationBits = W + T;
684
685  // Calcuate 2^T, at width T+W.
686  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
687
688  // Calculate the multiplicative inverse of K! / 2^T;
689  // this multiplication factor will perform the exact division by
690  // K! / 2^T.
691  APInt Mod = APInt::getSignedMinValue(W+1);
692  APInt MultiplyFactor = OddFactorial.zext(W+1);
693  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
694  MultiplyFactor = MultiplyFactor.trunc(W);
695
696  // Calculate the product, at width T+W
697  const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
698  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
699  for (unsigned i = 1; i != K; ++i) {
700    const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
701    Dividend = SE.getMulExpr(Dividend,
702                             SE.getTruncateOrZeroExtend(S, CalculationTy));
703  }
704
705  // Divide by 2^T
706  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
707
708  // Truncate the result, and divide by K! / 2^T.
709
710  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
711                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
712}
713
714/// evaluateAtIteration - Return the value of this chain of recurrences at
715/// the specified iteration number.  We can evaluate this recurrence by
716/// multiplying each element in the chain by the binomial coefficient
717/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
718///
719///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
720///
721/// where BC(It, k) stands for binomial coefficient.
722///
723const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
724                                               ScalarEvolution &SE) const {
725  const SCEV *Result = getStart();
726  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
727    // The computation is correct in the face of overflow provided that the
728    // multiplication is performed _after_ the evaluation of the binomial
729    // coefficient.
730    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
731    if (isa<SCEVCouldNotCompute>(Coeff))
732      return Coeff;
733
734    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
735  }
736  return Result;
737}
738
739//===----------------------------------------------------------------------===//
740//                    SCEV Expression folder implementations
741//===----------------------------------------------------------------------===//
742
743const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
744                                            const Type *Ty) {
745  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
746         "This is not a truncating conversion!");
747  assert(isSCEVable(Ty) &&
748         "This is not a conversion to a SCEVable type!");
749  Ty = getEffectiveSCEVType(Ty);
750
751  // Fold if the operand is constant.
752  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
753    return getConstant(
754      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
755
756  // trunc(trunc(x)) --> trunc(x)
757  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
758    return getTruncateExpr(ST->getOperand(), Ty);
759
760  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
761  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
762    return getTruncateOrSignExtend(SS->getOperand(), Ty);
763
764  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
765  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
766    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
767
768  // If the input value is a chrec scev, truncate the chrec's operands.
769  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
770    SmallVector<const SCEV *, 4> Operands;
771    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
772      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
773    return getAddRecExpr(Operands, AddRec->getLoop());
774  }
775
776  FoldingSetNodeID ID;
777  ID.AddInteger(scTruncate);
778  ID.AddPointer(Op);
779  ID.AddPointer(Ty);
780  void *IP = 0;
781  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
782  SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
783  new (S) SCEVTruncateExpr(Op, Ty);
784  UniqueSCEVs.InsertNode(S, IP);
785  return S;
786}
787
788const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
789                                              const Type *Ty) {
790  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
791         "This is not an extending conversion!");
792  assert(isSCEVable(Ty) &&
793         "This is not a conversion to a SCEVable type!");
794  Ty = getEffectiveSCEVType(Ty);
795
796  // Fold if the operand is constant.
797  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
798    const Type *IntTy = getEffectiveSCEVType(Ty);
799    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
800    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
801    return getConstant(cast<ConstantInt>(C));
802  }
803
804  // zext(zext(x)) --> zext(x)
805  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
806    return getZeroExtendExpr(SZ->getOperand(), Ty);
807
808  // If the input value is a chrec scev, and we can prove that the value
809  // did not overflow the old, smaller, value, we can zero extend all of the
810  // operands (often constants).  This allows analysis of something like
811  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
812  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
813    if (AR->isAffine()) {
814      const SCEV *Start = AR->getStart();
815      const SCEV *Step = AR->getStepRecurrence(*this);
816      unsigned BitWidth = getTypeSizeInBits(AR->getType());
817      const Loop *L = AR->getLoop();
818
819      // Check whether the backedge-taken count is SCEVCouldNotCompute.
820      // Note that this serves two purposes: It filters out loops that are
821      // simply not analyzable, and it covers the case where this code is
822      // being called from within backedge-taken count analysis, such that
823      // attempting to ask for the backedge-taken count would likely result
824      // in infinite recursion. In the later case, the analysis code will
825      // cope with a conservative value, and it will take care to purge
826      // that value once it has finished.
827      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
828      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
829        // Manually compute the final value for AR, checking for
830        // overflow.
831
832        // Check whether the backedge-taken count can be losslessly casted to
833        // the addrec's type. The count is always unsigned.
834        const SCEV *CastedMaxBECount =
835          getTruncateOrZeroExtend(MaxBECount, Start->getType());
836        const SCEV *RecastedMaxBECount =
837          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
838        if (MaxBECount == RecastedMaxBECount) {
839          const Type *WideTy = IntegerType::get(BitWidth * 2);
840          // Check whether Start+Step*MaxBECount has no unsigned overflow.
841          const SCEV *ZMul =
842            getMulExpr(CastedMaxBECount,
843                       getTruncateOrZeroExtend(Step, Start->getType()));
844          const SCEV *Add = getAddExpr(Start, ZMul);
845          const SCEV *OperandExtendedAdd =
846            getAddExpr(getZeroExtendExpr(Start, WideTy),
847                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
848                                  getZeroExtendExpr(Step, WideTy)));
849          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
850            // Return the expression with the addrec on the outside.
851            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
852                                 getZeroExtendExpr(Step, Ty),
853                                 L);
854
855          // Similar to above, only this time treat the step value as signed.
856          // This covers loops that count down.
857          const SCEV *SMul =
858            getMulExpr(CastedMaxBECount,
859                       getTruncateOrSignExtend(Step, Start->getType()));
860          Add = getAddExpr(Start, SMul);
861          OperandExtendedAdd =
862            getAddExpr(getZeroExtendExpr(Start, WideTy),
863                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
864                                  getSignExtendExpr(Step, WideTy)));
865          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
866            // Return the expression with the addrec on the outside.
867            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
868                                 getSignExtendExpr(Step, Ty),
869                                 L);
870        }
871
872        // If the backedge is guarded by a comparison with the pre-inc value
873        // the addrec is safe. Also, if the entry is guarded by a comparison
874        // with the start value and the backedge is guarded by a comparison
875        // with the post-inc value, the addrec is safe.
876        if (isKnownPositive(Step)) {
877          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
878                                      getUnsignedRange(Step).getUnsignedMax());
879          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
880              (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
881               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
882                                           AR->getPostIncExpr(*this), N)))
883            // Return the expression with the addrec on the outside.
884            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
885                                 getZeroExtendExpr(Step, Ty),
886                                 L);
887        } else if (isKnownNegative(Step)) {
888          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
889                                      getSignedRange(Step).getSignedMin());
890          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
891              (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
892               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
893                                           AR->getPostIncExpr(*this), N)))
894            // Return the expression with the addrec on the outside.
895            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
896                                 getSignExtendExpr(Step, Ty),
897                                 L);
898        }
899      }
900    }
901
902  FoldingSetNodeID ID;
903  ID.AddInteger(scZeroExtend);
904  ID.AddPointer(Op);
905  ID.AddPointer(Ty);
906  void *IP = 0;
907  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
908  SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
909  new (S) SCEVZeroExtendExpr(Op, Ty);
910  UniqueSCEVs.InsertNode(S, IP);
911  return S;
912}
913
914const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
915                                              const Type *Ty) {
916  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
917         "This is not an extending conversion!");
918  assert(isSCEVable(Ty) &&
919         "This is not a conversion to a SCEVable type!");
920  Ty = getEffectiveSCEVType(Ty);
921
922  // Fold if the operand is constant.
923  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
924    const Type *IntTy = getEffectiveSCEVType(Ty);
925    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
926    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
927    return getConstant(cast<ConstantInt>(C));
928  }
929
930  // sext(sext(x)) --> sext(x)
931  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
932    return getSignExtendExpr(SS->getOperand(), Ty);
933
934  // If the input value is a chrec scev, and we can prove that the value
935  // did not overflow the old, smaller, value, we can sign extend all of the
936  // operands (often constants).  This allows analysis of something like
937  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
938  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
939    if (AR->isAffine()) {
940      const SCEV *Start = AR->getStart();
941      const SCEV *Step = AR->getStepRecurrence(*this);
942      unsigned BitWidth = getTypeSizeInBits(AR->getType());
943      const Loop *L = AR->getLoop();
944
945      // Check whether the backedge-taken count is SCEVCouldNotCompute.
946      // Note that this serves two purposes: It filters out loops that are
947      // simply not analyzable, and it covers the case where this code is
948      // being called from within backedge-taken count analysis, such that
949      // attempting to ask for the backedge-taken count would likely result
950      // in infinite recursion. In the later case, the analysis code will
951      // cope with a conservative value, and it will take care to purge
952      // that value once it has finished.
953      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
954      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
955        // Manually compute the final value for AR, checking for
956        // overflow.
957
958        // Check whether the backedge-taken count can be losslessly casted to
959        // the addrec's type. The count is always unsigned.
960        const SCEV *CastedMaxBECount =
961          getTruncateOrZeroExtend(MaxBECount, Start->getType());
962        const SCEV *RecastedMaxBECount =
963          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
964        if (MaxBECount == RecastedMaxBECount) {
965          const Type *WideTy = IntegerType::get(BitWidth * 2);
966          // Check whether Start+Step*MaxBECount has no signed overflow.
967          const SCEV *SMul =
968            getMulExpr(CastedMaxBECount,
969                       getTruncateOrSignExtend(Step, Start->getType()));
970          const SCEV *Add = getAddExpr(Start, SMul);
971          const SCEV *OperandExtendedAdd =
972            getAddExpr(getSignExtendExpr(Start, WideTy),
973                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
974                                  getSignExtendExpr(Step, WideTy)));
975          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
976            // Return the expression with the addrec on the outside.
977            return getAddRecExpr(getSignExtendExpr(Start, Ty),
978                                 getSignExtendExpr(Step, Ty),
979                                 L);
980        }
981
982        // If the backedge is guarded by a comparison with the pre-inc value
983        // the addrec is safe. Also, if the entry is guarded by a comparison
984        // with the start value and the backedge is guarded by a comparison
985        // with the post-inc value, the addrec is safe.
986        if (isKnownPositive(Step)) {
987          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
988                                      getSignedRange(Step).getSignedMax());
989          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
990              (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
991               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
992                                           AR->getPostIncExpr(*this), N)))
993            // Return the expression with the addrec on the outside.
994            return getAddRecExpr(getSignExtendExpr(Start, Ty),
995                                 getSignExtendExpr(Step, Ty),
996                                 L);
997        } else if (isKnownNegative(Step)) {
998          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
999                                      getSignedRange(Step).getSignedMin());
1000          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1001              (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1002               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1003                                           AR->getPostIncExpr(*this), N)))
1004            // Return the expression with the addrec on the outside.
1005            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1006                                 getSignExtendExpr(Step, Ty),
1007                                 L);
1008        }
1009      }
1010    }
1011
1012  FoldingSetNodeID ID;
1013  ID.AddInteger(scSignExtend);
1014  ID.AddPointer(Op);
1015  ID.AddPointer(Ty);
1016  void *IP = 0;
1017  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1018  SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
1019  new (S) SCEVSignExtendExpr(Op, Ty);
1020  UniqueSCEVs.InsertNode(S, IP);
1021  return S;
1022}
1023
1024/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1025/// unspecified bits out to the given type.
1026///
1027const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1028                                             const Type *Ty) {
1029  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1030         "This is not an extending conversion!");
1031  assert(isSCEVable(Ty) &&
1032         "This is not a conversion to a SCEVable type!");
1033  Ty = getEffectiveSCEVType(Ty);
1034
1035  // Sign-extend negative constants.
1036  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1037    if (SC->getValue()->getValue().isNegative())
1038      return getSignExtendExpr(Op, Ty);
1039
1040  // Peel off a truncate cast.
1041  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1042    const SCEV *NewOp = T->getOperand();
1043    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1044      return getAnyExtendExpr(NewOp, Ty);
1045    return getTruncateOrNoop(NewOp, Ty);
1046  }
1047
1048  // Next try a zext cast. If the cast is folded, use it.
1049  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1050  if (!isa<SCEVZeroExtendExpr>(ZExt))
1051    return ZExt;
1052
1053  // Next try a sext cast. If the cast is folded, use it.
1054  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1055  if (!isa<SCEVSignExtendExpr>(SExt))
1056    return SExt;
1057
1058  // If the expression is obviously signed, use the sext cast value.
1059  if (isa<SCEVSMaxExpr>(Op))
1060    return SExt;
1061
1062  // Absent any other information, use the zext cast value.
1063  return ZExt;
1064}
1065
1066/// CollectAddOperandsWithScales - Process the given Ops list, which is
1067/// a list of operands to be added under the given scale, update the given
1068/// map. This is a helper function for getAddRecExpr. As an example of
1069/// what it does, given a sequence of operands that would form an add
1070/// expression like this:
1071///
1072///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1073///
1074/// where A and B are constants, update the map with these values:
1075///
1076///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1077///
1078/// and add 13 + A*B*29 to AccumulatedConstant.
1079/// This will allow getAddRecExpr to produce this:
1080///
1081///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1082///
1083/// This form often exposes folding opportunities that are hidden in
1084/// the original operand list.
1085///
1086/// Return true iff it appears that any interesting folding opportunities
1087/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1088/// the common case where no interesting opportunities are present, and
1089/// is also used as a check to avoid infinite recursion.
1090///
1091static bool
1092CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1093                             SmallVector<const SCEV *, 8> &NewOps,
1094                             APInt &AccumulatedConstant,
1095                             const SmallVectorImpl<const SCEV *> &Ops,
1096                             const APInt &Scale,
1097                             ScalarEvolution &SE) {
1098  bool Interesting = false;
1099
1100  // Iterate over the add operands.
1101  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1102    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1103    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1104      APInt NewScale =
1105        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1106      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1107        // A multiplication of a constant with another add; recurse.
1108        Interesting |=
1109          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1110                                       cast<SCEVAddExpr>(Mul->getOperand(1))
1111                                         ->getOperands(),
1112                                       NewScale, SE);
1113      } else {
1114        // A multiplication of a constant with some other value. Update
1115        // the map.
1116        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1117        const SCEV *Key = SE.getMulExpr(MulOps);
1118        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1119          M.insert(std::make_pair(Key, NewScale));
1120        if (Pair.second) {
1121          NewOps.push_back(Pair.first->first);
1122        } else {
1123          Pair.first->second += NewScale;
1124          // The map already had an entry for this value, which may indicate
1125          // a folding opportunity.
1126          Interesting = true;
1127        }
1128      }
1129    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1130      // Pull a buried constant out to the outside.
1131      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1132        Interesting = true;
1133      AccumulatedConstant += Scale * C->getValue()->getValue();
1134    } else {
1135      // An ordinary operand. Update the map.
1136      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1137        M.insert(std::make_pair(Ops[i], Scale));
1138      if (Pair.second) {
1139        NewOps.push_back(Pair.first->first);
1140      } else {
1141        Pair.first->second += Scale;
1142        // The map already had an entry for this value, which may indicate
1143        // a folding opportunity.
1144        Interesting = true;
1145      }
1146    }
1147  }
1148
1149  return Interesting;
1150}
1151
1152namespace {
1153  struct APIntCompare {
1154    bool operator()(const APInt &LHS, const APInt &RHS) const {
1155      return LHS.ult(RHS);
1156    }
1157  };
1158}
1159
1160/// getAddExpr - Get a canonical add expression, or something simpler if
1161/// possible.
1162const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1163  assert(!Ops.empty() && "Cannot get empty add!");
1164  if (Ops.size() == 1) return Ops[0];
1165#ifndef NDEBUG
1166  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1167    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1168           getEffectiveSCEVType(Ops[0]->getType()) &&
1169           "SCEVAddExpr operand types don't match!");
1170#endif
1171
1172  // Sort by complexity, this groups all similar expression types together.
1173  GroupByComplexity(Ops, LI);
1174
1175  // If there are any constants, fold them together.
1176  unsigned Idx = 0;
1177  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1178    ++Idx;
1179    assert(Idx < Ops.size());
1180    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1181      // We found two constants, fold them together!
1182      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1183                           RHSC->getValue()->getValue());
1184      if (Ops.size() == 2) return Ops[0];
1185      Ops.erase(Ops.begin()+1);  // Erase the folded element
1186      LHSC = cast<SCEVConstant>(Ops[0]);
1187    }
1188
1189    // If we are left with a constant zero being added, strip it off.
1190    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1191      Ops.erase(Ops.begin());
1192      --Idx;
1193    }
1194  }
1195
1196  if (Ops.size() == 1) return Ops[0];
1197
1198  // Okay, check to see if the same value occurs in the operand list twice.  If
1199  // so, merge them together into an multiply expression.  Since we sorted the
1200  // list, these values are required to be adjacent.
1201  const Type *Ty = Ops[0]->getType();
1202  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1203    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1204      // Found a match, merge the two values into a multiply, and add any
1205      // remaining values to the result.
1206      const SCEV *Two = getIntegerSCEV(2, Ty);
1207      const SCEV *Mul = getMulExpr(Ops[i], Two);
1208      if (Ops.size() == 2)
1209        return Mul;
1210      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1211      Ops.push_back(Mul);
1212      return getAddExpr(Ops);
1213    }
1214
1215  // Check for truncates. If all the operands are truncated from the same
1216  // type, see if factoring out the truncate would permit the result to be
1217  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1218  // if the contents of the resulting outer trunc fold to something simple.
1219  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1220    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1221    const Type *DstType = Trunc->getType();
1222    const Type *SrcType = Trunc->getOperand()->getType();
1223    SmallVector<const SCEV *, 8> LargeOps;
1224    bool Ok = true;
1225    // Check all the operands to see if they can be represented in the
1226    // source type of the truncate.
1227    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1228      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1229        if (T->getOperand()->getType() != SrcType) {
1230          Ok = false;
1231          break;
1232        }
1233        LargeOps.push_back(T->getOperand());
1234      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1235        // This could be either sign or zero extension, but sign extension
1236        // is much more likely to be foldable here.
1237        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1238      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1239        SmallVector<const SCEV *, 8> LargeMulOps;
1240        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1241          if (const SCEVTruncateExpr *T =
1242                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1243            if (T->getOperand()->getType() != SrcType) {
1244              Ok = false;
1245              break;
1246            }
1247            LargeMulOps.push_back(T->getOperand());
1248          } else if (const SCEVConstant *C =
1249                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1250            // This could be either sign or zero extension, but sign extension
1251            // is much more likely to be foldable here.
1252            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1253          } else {
1254            Ok = false;
1255            break;
1256          }
1257        }
1258        if (Ok)
1259          LargeOps.push_back(getMulExpr(LargeMulOps));
1260      } else {
1261        Ok = false;
1262        break;
1263      }
1264    }
1265    if (Ok) {
1266      // Evaluate the expression in the larger type.
1267      const SCEV *Fold = getAddExpr(LargeOps);
1268      // If it folds to something simple, use it. Otherwise, don't.
1269      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1270        return getTruncateExpr(Fold, DstType);
1271    }
1272  }
1273
1274  // Skip past any other cast SCEVs.
1275  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1276    ++Idx;
1277
1278  // If there are add operands they would be next.
1279  if (Idx < Ops.size()) {
1280    bool DeletedAdd = false;
1281    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1282      // If we have an add, expand the add operands onto the end of the operands
1283      // list.
1284      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1285      Ops.erase(Ops.begin()+Idx);
1286      DeletedAdd = true;
1287    }
1288
1289    // If we deleted at least one add, we added operands to the end of the list,
1290    // and they are not necessarily sorted.  Recurse to resort and resimplify
1291    // any operands we just aquired.
1292    if (DeletedAdd)
1293      return getAddExpr(Ops);
1294  }
1295
1296  // Skip over the add expression until we get to a multiply.
1297  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1298    ++Idx;
1299
1300  // Check to see if there are any folding opportunities present with
1301  // operands multiplied by constant values.
1302  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1303    uint64_t BitWidth = getTypeSizeInBits(Ty);
1304    DenseMap<const SCEV *, APInt> M;
1305    SmallVector<const SCEV *, 8> NewOps;
1306    APInt AccumulatedConstant(BitWidth, 0);
1307    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1308                                     Ops, APInt(BitWidth, 1), *this)) {
1309      // Some interesting folding opportunity is present, so its worthwhile to
1310      // re-generate the operands list. Group the operands by constant scale,
1311      // to avoid multiplying by the same constant scale multiple times.
1312      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1313      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1314           E = NewOps.end(); I != E; ++I)
1315        MulOpLists[M.find(*I)->second].push_back(*I);
1316      // Re-generate the operands list.
1317      Ops.clear();
1318      if (AccumulatedConstant != 0)
1319        Ops.push_back(getConstant(AccumulatedConstant));
1320      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1321           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1322        if (I->first != 0)
1323          Ops.push_back(getMulExpr(getConstant(I->first),
1324                                   getAddExpr(I->second)));
1325      if (Ops.empty())
1326        return getIntegerSCEV(0, Ty);
1327      if (Ops.size() == 1)
1328        return Ops[0];
1329      return getAddExpr(Ops);
1330    }
1331  }
1332
1333  // If we are adding something to a multiply expression, make sure the
1334  // something is not already an operand of the multiply.  If so, merge it into
1335  // the multiply.
1336  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1337    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1338    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1339      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1340      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1341        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1342          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1343          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1344          if (Mul->getNumOperands() != 2) {
1345            // If the multiply has more than two operands, we must get the
1346            // Y*Z term.
1347            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1348            MulOps.erase(MulOps.begin()+MulOp);
1349            InnerMul = getMulExpr(MulOps);
1350          }
1351          const SCEV *One = getIntegerSCEV(1, Ty);
1352          const SCEV *AddOne = getAddExpr(InnerMul, One);
1353          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1354          if (Ops.size() == 2) return OuterMul;
1355          if (AddOp < Idx) {
1356            Ops.erase(Ops.begin()+AddOp);
1357            Ops.erase(Ops.begin()+Idx-1);
1358          } else {
1359            Ops.erase(Ops.begin()+Idx);
1360            Ops.erase(Ops.begin()+AddOp-1);
1361          }
1362          Ops.push_back(OuterMul);
1363          return getAddExpr(Ops);
1364        }
1365
1366      // Check this multiply against other multiplies being added together.
1367      for (unsigned OtherMulIdx = Idx+1;
1368           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1369           ++OtherMulIdx) {
1370        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1371        // If MulOp occurs in OtherMul, we can fold the two multiplies
1372        // together.
1373        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1374             OMulOp != e; ++OMulOp)
1375          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1376            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1377            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1378            if (Mul->getNumOperands() != 2) {
1379              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1380                                                  Mul->op_end());
1381              MulOps.erase(MulOps.begin()+MulOp);
1382              InnerMul1 = getMulExpr(MulOps);
1383            }
1384            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1385            if (OtherMul->getNumOperands() != 2) {
1386              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1387                                                  OtherMul->op_end());
1388              MulOps.erase(MulOps.begin()+OMulOp);
1389              InnerMul2 = getMulExpr(MulOps);
1390            }
1391            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1392            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1393            if (Ops.size() == 2) return OuterMul;
1394            Ops.erase(Ops.begin()+Idx);
1395            Ops.erase(Ops.begin()+OtherMulIdx-1);
1396            Ops.push_back(OuterMul);
1397            return getAddExpr(Ops);
1398          }
1399      }
1400    }
1401  }
1402
1403  // If there are any add recurrences in the operands list, see if any other
1404  // added values are loop invariant.  If so, we can fold them into the
1405  // recurrence.
1406  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1407    ++Idx;
1408
1409  // Scan over all recurrences, trying to fold loop invariants into them.
1410  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1411    // Scan all of the other operands to this add and add them to the vector if
1412    // they are loop invariant w.r.t. the recurrence.
1413    SmallVector<const SCEV *, 8> LIOps;
1414    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1415    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1416      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1417        LIOps.push_back(Ops[i]);
1418        Ops.erase(Ops.begin()+i);
1419        --i; --e;
1420      }
1421
1422    // If we found some loop invariants, fold them into the recurrence.
1423    if (!LIOps.empty()) {
1424      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1425      LIOps.push_back(AddRec->getStart());
1426
1427      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1428                                           AddRec->op_end());
1429      AddRecOps[0] = getAddExpr(LIOps);
1430
1431      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1432      // If all of the other operands were loop invariant, we are done.
1433      if (Ops.size() == 1) return NewRec;
1434
1435      // Otherwise, add the folded AddRec by the non-liv parts.
1436      for (unsigned i = 0;; ++i)
1437        if (Ops[i] == AddRec) {
1438          Ops[i] = NewRec;
1439          break;
1440        }
1441      return getAddExpr(Ops);
1442    }
1443
1444    // Okay, if there weren't any loop invariants to be folded, check to see if
1445    // there are multiple AddRec's with the same loop induction variable being
1446    // added together.  If so, we can fold them.
1447    for (unsigned OtherIdx = Idx+1;
1448         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1449      if (OtherIdx != Idx) {
1450        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1451        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1452          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1453          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1454                                              AddRec->op_end());
1455          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1456            if (i >= NewOps.size()) {
1457              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1458                            OtherAddRec->op_end());
1459              break;
1460            }
1461            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1462          }
1463          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1464
1465          if (Ops.size() == 2) return NewAddRec;
1466
1467          Ops.erase(Ops.begin()+Idx);
1468          Ops.erase(Ops.begin()+OtherIdx-1);
1469          Ops.push_back(NewAddRec);
1470          return getAddExpr(Ops);
1471        }
1472      }
1473
1474    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1475    // next one.
1476  }
1477
1478  // Okay, it looks like we really DO need an add expr.  Check to see if we
1479  // already have one, otherwise create a new one.
1480  FoldingSetNodeID ID;
1481  ID.AddInteger(scAddExpr);
1482  ID.AddInteger(Ops.size());
1483  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1484    ID.AddPointer(Ops[i]);
1485  void *IP = 0;
1486  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1487  SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1488  new (S) SCEVAddExpr(Ops);
1489  UniqueSCEVs.InsertNode(S, IP);
1490  return S;
1491}
1492
1493
1494/// getMulExpr - Get a canonical multiply expression, or something simpler if
1495/// possible.
1496const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1497  assert(!Ops.empty() && "Cannot get empty mul!");
1498#ifndef NDEBUG
1499  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1500    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1501           getEffectiveSCEVType(Ops[0]->getType()) &&
1502           "SCEVMulExpr operand types don't match!");
1503#endif
1504
1505  // Sort by complexity, this groups all similar expression types together.
1506  GroupByComplexity(Ops, LI);
1507
1508  // If there are any constants, fold them together.
1509  unsigned Idx = 0;
1510  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1511
1512    // C1*(C2+V) -> C1*C2 + C1*V
1513    if (Ops.size() == 2)
1514      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1515        if (Add->getNumOperands() == 2 &&
1516            isa<SCEVConstant>(Add->getOperand(0)))
1517          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1518                            getMulExpr(LHSC, Add->getOperand(1)));
1519
1520
1521    ++Idx;
1522    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1523      // We found two constants, fold them together!
1524      ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() *
1525                                           RHSC->getValue()->getValue());
1526      Ops[0] = getConstant(Fold);
1527      Ops.erase(Ops.begin()+1);  // Erase the folded element
1528      if (Ops.size() == 1) return Ops[0];
1529      LHSC = cast<SCEVConstant>(Ops[0]);
1530    }
1531
1532    // If we are left with a constant one being multiplied, strip it off.
1533    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1534      Ops.erase(Ops.begin());
1535      --Idx;
1536    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1537      // If we have a multiply of zero, it will always be zero.
1538      return Ops[0];
1539    }
1540  }
1541
1542  // Skip over the add expression until we get to a multiply.
1543  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1544    ++Idx;
1545
1546  if (Ops.size() == 1)
1547    return Ops[0];
1548
1549  // If there are mul operands inline them all into this expression.
1550  if (Idx < Ops.size()) {
1551    bool DeletedMul = false;
1552    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1553      // If we have an mul, expand the mul operands onto the end of the operands
1554      // list.
1555      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1556      Ops.erase(Ops.begin()+Idx);
1557      DeletedMul = true;
1558    }
1559
1560    // If we deleted at least one mul, we added operands to the end of the list,
1561    // and they are not necessarily sorted.  Recurse to resort and resimplify
1562    // any operands we just aquired.
1563    if (DeletedMul)
1564      return getMulExpr(Ops);
1565  }
1566
1567  // If there are any add recurrences in the operands list, see if any other
1568  // added values are loop invariant.  If so, we can fold them into the
1569  // recurrence.
1570  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1571    ++Idx;
1572
1573  // Scan over all recurrences, trying to fold loop invariants into them.
1574  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1575    // Scan all of the other operands to this mul and add them to the vector if
1576    // they are loop invariant w.r.t. the recurrence.
1577    SmallVector<const SCEV *, 8> LIOps;
1578    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1579    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1580      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1581        LIOps.push_back(Ops[i]);
1582        Ops.erase(Ops.begin()+i);
1583        --i; --e;
1584      }
1585
1586    // If we found some loop invariants, fold them into the recurrence.
1587    if (!LIOps.empty()) {
1588      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1589      SmallVector<const SCEV *, 4> NewOps;
1590      NewOps.reserve(AddRec->getNumOperands());
1591      if (LIOps.size() == 1) {
1592        const SCEV *Scale = LIOps[0];
1593        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1594          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1595      } else {
1596        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1597          SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1598          MulOps.push_back(AddRec->getOperand(i));
1599          NewOps.push_back(getMulExpr(MulOps));
1600        }
1601      }
1602
1603      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1604
1605      // If all of the other operands were loop invariant, we are done.
1606      if (Ops.size() == 1) return NewRec;
1607
1608      // Otherwise, multiply the folded AddRec by the non-liv parts.
1609      for (unsigned i = 0;; ++i)
1610        if (Ops[i] == AddRec) {
1611          Ops[i] = NewRec;
1612          break;
1613        }
1614      return getMulExpr(Ops);
1615    }
1616
1617    // Okay, if there weren't any loop invariants to be folded, check to see if
1618    // there are multiple AddRec's with the same loop induction variable being
1619    // multiplied together.  If so, we can fold them.
1620    for (unsigned OtherIdx = Idx+1;
1621         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1622      if (OtherIdx != Idx) {
1623        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1624        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1625          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1626          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1627          const SCEV *NewStart = getMulExpr(F->getStart(),
1628                                                 G->getStart());
1629          const SCEV *B = F->getStepRecurrence(*this);
1630          const SCEV *D = G->getStepRecurrence(*this);
1631          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1632                                          getMulExpr(G, B),
1633                                          getMulExpr(B, D));
1634          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1635                                               F->getLoop());
1636          if (Ops.size() == 2) return NewAddRec;
1637
1638          Ops.erase(Ops.begin()+Idx);
1639          Ops.erase(Ops.begin()+OtherIdx-1);
1640          Ops.push_back(NewAddRec);
1641          return getMulExpr(Ops);
1642        }
1643      }
1644
1645    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1646    // next one.
1647  }
1648
1649  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1650  // already have one, otherwise create a new one.
1651  FoldingSetNodeID ID;
1652  ID.AddInteger(scMulExpr);
1653  ID.AddInteger(Ops.size());
1654  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1655    ID.AddPointer(Ops[i]);
1656  void *IP = 0;
1657  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1658  SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1659  new (S) SCEVMulExpr(Ops);
1660  UniqueSCEVs.InsertNode(S, IP);
1661  return S;
1662}
1663
1664/// getUDivExpr - Get a canonical multiply expression, or something simpler if
1665/// possible.
1666const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1667                                         const SCEV *RHS) {
1668  assert(getEffectiveSCEVType(LHS->getType()) ==
1669         getEffectiveSCEVType(RHS->getType()) &&
1670         "SCEVUDivExpr operand types don't match!");
1671
1672  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1673    if (RHSC->getValue()->equalsInt(1))
1674      return LHS;                            // X udiv 1 --> x
1675    if (RHSC->isZero())
1676      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1677
1678    // Determine if the division can be folded into the operands of
1679    // its operands.
1680    // TODO: Generalize this to non-constants by using known-bits information.
1681    const Type *Ty = LHS->getType();
1682    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1683    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1684    // For non-power-of-two values, effectively round the value up to the
1685    // nearest power of two.
1686    if (!RHSC->getValue()->getValue().isPowerOf2())
1687      ++MaxShiftAmt;
1688    const IntegerType *ExtTy =
1689      IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1690    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1691    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1692      if (const SCEVConstant *Step =
1693            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1694        if (!Step->getValue()->getValue()
1695              .urem(RHSC->getValue()->getValue()) &&
1696            getZeroExtendExpr(AR, ExtTy) ==
1697            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1698                          getZeroExtendExpr(Step, ExtTy),
1699                          AR->getLoop())) {
1700          SmallVector<const SCEV *, 4> Operands;
1701          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1702            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1703          return getAddRecExpr(Operands, AR->getLoop());
1704        }
1705    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1706    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1707      SmallVector<const SCEV *, 4> Operands;
1708      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1709        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1710      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1711        // Find an operand that's safely divisible.
1712        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1713          const SCEV *Op = M->getOperand(i);
1714          const SCEV *Div = getUDivExpr(Op, RHSC);
1715          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1716            const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1717            Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1718                                                  MOperands.end());
1719            Operands[i] = Div;
1720            return getMulExpr(Operands);
1721          }
1722        }
1723    }
1724    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1725    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1726      SmallVector<const SCEV *, 4> Operands;
1727      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1728        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1729      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1730        Operands.clear();
1731        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1732          const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1733          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1734            break;
1735          Operands.push_back(Op);
1736        }
1737        if (Operands.size() == A->getNumOperands())
1738          return getAddExpr(Operands);
1739      }
1740    }
1741
1742    // Fold if both operands are constant.
1743    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1744      Constant *LHSCV = LHSC->getValue();
1745      Constant *RHSCV = RHSC->getValue();
1746      return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1747                                                                 RHSCV)));
1748    }
1749  }
1750
1751  FoldingSetNodeID ID;
1752  ID.AddInteger(scUDivExpr);
1753  ID.AddPointer(LHS);
1754  ID.AddPointer(RHS);
1755  void *IP = 0;
1756  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1757  SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1758  new (S) SCEVUDivExpr(LHS, RHS);
1759  UniqueSCEVs.InsertNode(S, IP);
1760  return S;
1761}
1762
1763
1764/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1765/// Simplify the expression as much as possible.
1766const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1767                               const SCEV *Step, const Loop *L) {
1768  SmallVector<const SCEV *, 4> Operands;
1769  Operands.push_back(Start);
1770  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1771    if (StepChrec->getLoop() == L) {
1772      Operands.insert(Operands.end(), StepChrec->op_begin(),
1773                      StepChrec->op_end());
1774      return getAddRecExpr(Operands, L);
1775    }
1776
1777  Operands.push_back(Step);
1778  return getAddRecExpr(Operands, L);
1779}
1780
1781/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1782/// Simplify the expression as much as possible.
1783const SCEV *
1784ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1785                               const Loop *L) {
1786  if (Operands.size() == 1) return Operands[0];
1787#ifndef NDEBUG
1788  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1789    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1790           getEffectiveSCEVType(Operands[0]->getType()) &&
1791           "SCEVAddRecExpr operand types don't match!");
1792#endif
1793
1794  if (Operands.back()->isZero()) {
1795    Operands.pop_back();
1796    return getAddRecExpr(Operands, L);             // {X,+,0}  -->  X
1797  }
1798
1799  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1800  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1801    const Loop* NestedLoop = NestedAR->getLoop();
1802    if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1803      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1804                                                NestedAR->op_end());
1805      Operands[0] = NestedAR->getStart();
1806      // AddRecs require their operands be loop-invariant with respect to their
1807      // loops. Don't perform this transformation if it would break this
1808      // requirement.
1809      bool AllInvariant = true;
1810      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1811        if (!Operands[i]->isLoopInvariant(L)) {
1812          AllInvariant = false;
1813          break;
1814        }
1815      if (AllInvariant) {
1816        NestedOperands[0] = getAddRecExpr(Operands, L);
1817        AllInvariant = true;
1818        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1819          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1820            AllInvariant = false;
1821            break;
1822          }
1823        if (AllInvariant)
1824          // Ok, both add recurrences are valid after the transformation.
1825          return getAddRecExpr(NestedOperands, NestedLoop);
1826      }
1827      // Reset Operands to its original state.
1828      Operands[0] = NestedAR;
1829    }
1830  }
1831
1832  FoldingSetNodeID ID;
1833  ID.AddInteger(scAddRecExpr);
1834  ID.AddInteger(Operands.size());
1835  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1836    ID.AddPointer(Operands[i]);
1837  ID.AddPointer(L);
1838  void *IP = 0;
1839  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1840  SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1841  new (S) SCEVAddRecExpr(Operands, L);
1842  UniqueSCEVs.InsertNode(S, IP);
1843  return S;
1844}
1845
1846const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1847                                         const SCEV *RHS) {
1848  SmallVector<const SCEV *, 2> Ops;
1849  Ops.push_back(LHS);
1850  Ops.push_back(RHS);
1851  return getSMaxExpr(Ops);
1852}
1853
1854const SCEV *
1855ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1856  assert(!Ops.empty() && "Cannot get empty smax!");
1857  if (Ops.size() == 1) return Ops[0];
1858#ifndef NDEBUG
1859  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1860    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1861           getEffectiveSCEVType(Ops[0]->getType()) &&
1862           "SCEVSMaxExpr operand types don't match!");
1863#endif
1864
1865  // Sort by complexity, this groups all similar expression types together.
1866  GroupByComplexity(Ops, LI);
1867
1868  // If there are any constants, fold them together.
1869  unsigned Idx = 0;
1870  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1871    ++Idx;
1872    assert(Idx < Ops.size());
1873    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1874      // We found two constants, fold them together!
1875      ConstantInt *Fold = ConstantInt::get(
1876                              APIntOps::smax(LHSC->getValue()->getValue(),
1877                                             RHSC->getValue()->getValue()));
1878      Ops[0] = getConstant(Fold);
1879      Ops.erase(Ops.begin()+1);  // Erase the folded element
1880      if (Ops.size() == 1) return Ops[0];
1881      LHSC = cast<SCEVConstant>(Ops[0]);
1882    }
1883
1884    // If we are left with a constant minimum-int, strip it off.
1885    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1886      Ops.erase(Ops.begin());
1887      --Idx;
1888    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1889      // If we have an smax with a constant maximum-int, it will always be
1890      // maximum-int.
1891      return Ops[0];
1892    }
1893  }
1894
1895  if (Ops.size() == 1) return Ops[0];
1896
1897  // Find the first SMax
1898  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1899    ++Idx;
1900
1901  // Check to see if one of the operands is an SMax. If so, expand its operands
1902  // onto our operand list, and recurse to simplify.
1903  if (Idx < Ops.size()) {
1904    bool DeletedSMax = false;
1905    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1906      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1907      Ops.erase(Ops.begin()+Idx);
1908      DeletedSMax = true;
1909    }
1910
1911    if (DeletedSMax)
1912      return getSMaxExpr(Ops);
1913  }
1914
1915  // Okay, check to see if the same value occurs in the operand list twice.  If
1916  // so, delete one.  Since we sorted the list, these values are required to
1917  // be adjacent.
1918  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1919    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
1920      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1921      --i; --e;
1922    }
1923
1924  if (Ops.size() == 1) return Ops[0];
1925
1926  assert(!Ops.empty() && "Reduced smax down to nothing!");
1927
1928  // Okay, it looks like we really DO need an smax expr.  Check to see if we
1929  // already have one, otherwise create a new one.
1930  FoldingSetNodeID ID;
1931  ID.AddInteger(scSMaxExpr);
1932  ID.AddInteger(Ops.size());
1933  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1934    ID.AddPointer(Ops[i]);
1935  void *IP = 0;
1936  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1937  SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1938  new (S) SCEVSMaxExpr(Ops);
1939  UniqueSCEVs.InsertNode(S, IP);
1940  return S;
1941}
1942
1943const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1944                                         const SCEV *RHS) {
1945  SmallVector<const SCEV *, 2> Ops;
1946  Ops.push_back(LHS);
1947  Ops.push_back(RHS);
1948  return getUMaxExpr(Ops);
1949}
1950
1951const SCEV *
1952ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1953  assert(!Ops.empty() && "Cannot get empty umax!");
1954  if (Ops.size() == 1) return Ops[0];
1955#ifndef NDEBUG
1956  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1957    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1958           getEffectiveSCEVType(Ops[0]->getType()) &&
1959           "SCEVUMaxExpr operand types don't match!");
1960#endif
1961
1962  // Sort by complexity, this groups all similar expression types together.
1963  GroupByComplexity(Ops, LI);
1964
1965  // If there are any constants, fold them together.
1966  unsigned Idx = 0;
1967  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1968    ++Idx;
1969    assert(Idx < Ops.size());
1970    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1971      // We found two constants, fold them together!
1972      ConstantInt *Fold = ConstantInt::get(
1973                              APIntOps::umax(LHSC->getValue()->getValue(),
1974                                             RHSC->getValue()->getValue()));
1975      Ops[0] = getConstant(Fold);
1976      Ops.erase(Ops.begin()+1);  // Erase the folded element
1977      if (Ops.size() == 1) return Ops[0];
1978      LHSC = cast<SCEVConstant>(Ops[0]);
1979    }
1980
1981    // If we are left with a constant minimum-int, strip it off.
1982    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1983      Ops.erase(Ops.begin());
1984      --Idx;
1985    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1986      // If we have an umax with a constant maximum-int, it will always be
1987      // maximum-int.
1988      return Ops[0];
1989    }
1990  }
1991
1992  if (Ops.size() == 1) return Ops[0];
1993
1994  // Find the first UMax
1995  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1996    ++Idx;
1997
1998  // Check to see if one of the operands is a UMax. If so, expand its operands
1999  // onto our operand list, and recurse to simplify.
2000  if (Idx < Ops.size()) {
2001    bool DeletedUMax = false;
2002    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2003      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
2004      Ops.erase(Ops.begin()+Idx);
2005      DeletedUMax = true;
2006    }
2007
2008    if (DeletedUMax)
2009      return getUMaxExpr(Ops);
2010  }
2011
2012  // Okay, check to see if the same value occurs in the operand list twice.  If
2013  // so, delete one.  Since we sorted the list, these values are required to
2014  // be adjacent.
2015  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2016    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
2017      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2018      --i; --e;
2019    }
2020
2021  if (Ops.size() == 1) return Ops[0];
2022
2023  assert(!Ops.empty() && "Reduced umax down to nothing!");
2024
2025  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2026  // already have one, otherwise create a new one.
2027  FoldingSetNodeID ID;
2028  ID.AddInteger(scUMaxExpr);
2029  ID.AddInteger(Ops.size());
2030  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2031    ID.AddPointer(Ops[i]);
2032  void *IP = 0;
2033  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2034  SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
2035  new (S) SCEVUMaxExpr(Ops);
2036  UniqueSCEVs.InsertNode(S, IP);
2037  return S;
2038}
2039
2040const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2041                                         const SCEV *RHS) {
2042  // ~smax(~x, ~y) == smin(x, y).
2043  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2044}
2045
2046const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2047                                         const SCEV *RHS) {
2048  // ~umax(~x, ~y) == umin(x, y)
2049  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2050}
2051
2052const SCEV *ScalarEvolution::getUnknown(Value *V) {
2053  // Don't attempt to do anything other than create a SCEVUnknown object
2054  // here.  createSCEV only calls getUnknown after checking for all other
2055  // interesting possibilities, and any other code that calls getUnknown
2056  // is doing so in order to hide a value from SCEV canonicalization.
2057
2058  FoldingSetNodeID ID;
2059  ID.AddInteger(scUnknown);
2060  ID.AddPointer(V);
2061  void *IP = 0;
2062  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2063  SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2064  new (S) SCEVUnknown(V);
2065  UniqueSCEVs.InsertNode(S, IP);
2066  return S;
2067}
2068
2069//===----------------------------------------------------------------------===//
2070//            Basic SCEV Analysis and PHI Idiom Recognition Code
2071//
2072
2073/// isSCEVable - Test if values of the given type are analyzable within
2074/// the SCEV framework. This primarily includes integer types, and it
2075/// can optionally include pointer types if the ScalarEvolution class
2076/// has access to target-specific information.
2077bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2078  // Integers are always SCEVable.
2079  if (Ty->isInteger())
2080    return true;
2081
2082  // Pointers are SCEVable if TargetData information is available
2083  // to provide pointer size information.
2084  if (isa<PointerType>(Ty))
2085    return TD != NULL;
2086
2087  // Otherwise it's not SCEVable.
2088  return false;
2089}
2090
2091/// getTypeSizeInBits - Return the size in bits of the specified type,
2092/// for which isSCEVable must return true.
2093uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2094  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2095
2096  // If we have a TargetData, use it!
2097  if (TD)
2098    return TD->getTypeSizeInBits(Ty);
2099
2100  // Otherwise, we support only integer types.
2101  assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2102  return Ty->getPrimitiveSizeInBits();
2103}
2104
2105/// getEffectiveSCEVType - Return a type with the same bitwidth as
2106/// the given type and which represents how SCEV will treat the given
2107/// type, for which isSCEVable must return true. For pointer types,
2108/// this is the pointer-sized integer type.
2109const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2110  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2111
2112  if (Ty->isInteger())
2113    return Ty;
2114
2115  assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2116  return TD->getIntPtrType();
2117}
2118
2119const SCEV *ScalarEvolution::getCouldNotCompute() {
2120  return &CouldNotCompute;
2121}
2122
2123/// hasSCEV - Return true if the SCEV for this value has already been
2124/// computed.
2125bool ScalarEvolution::hasSCEV(Value *V) const {
2126  return Scalars.count(V);
2127}
2128
2129/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2130/// expression and create a new one.
2131const SCEV *ScalarEvolution::getSCEV(Value *V) {
2132  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2133
2134  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2135  if (I != Scalars.end()) return I->second;
2136  const SCEV *S = createSCEV(V);
2137  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2138  return S;
2139}
2140
2141/// getIntegerSCEV - Given a SCEVable type, create a constant for the
2142/// specified signed integer value and return a SCEV for the constant.
2143const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2144  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2145  return getConstant(ConstantInt::get(ITy, Val));
2146}
2147
2148/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2149///
2150const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2151  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2152    return getConstant(cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2153
2154  const Type *Ty = V->getType();
2155  Ty = getEffectiveSCEVType(Ty);
2156  return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty)));
2157}
2158
2159/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2160const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2161  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2162    return getConstant(cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2163
2164  const Type *Ty = V->getType();
2165  Ty = getEffectiveSCEVType(Ty);
2166  const SCEV *AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty));
2167  return getMinusSCEV(AllOnes, V);
2168}
2169
2170/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2171///
2172const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2173                                          const SCEV *RHS) {
2174  // X - Y --> X + -Y
2175  return getAddExpr(LHS, getNegativeSCEV(RHS));
2176}
2177
2178/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2179/// input value to the specified type.  If the type must be extended, it is zero
2180/// extended.
2181const SCEV *
2182ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2183                                         const Type *Ty) {
2184  const Type *SrcTy = V->getType();
2185  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2186         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2187         "Cannot truncate or zero extend with non-integer arguments!");
2188  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2189    return V;  // No conversion
2190  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2191    return getTruncateExpr(V, Ty);
2192  return getZeroExtendExpr(V, Ty);
2193}
2194
2195/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2196/// input value to the specified type.  If the type must be extended, it is sign
2197/// extended.
2198const SCEV *
2199ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2200                                         const Type *Ty) {
2201  const Type *SrcTy = V->getType();
2202  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2203         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2204         "Cannot truncate or zero extend with non-integer arguments!");
2205  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2206    return V;  // No conversion
2207  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2208    return getTruncateExpr(V, Ty);
2209  return getSignExtendExpr(V, Ty);
2210}
2211
2212/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2213/// input value to the specified type.  If the type must be extended, it is zero
2214/// extended.  The conversion must not be narrowing.
2215const SCEV *
2216ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2217  const Type *SrcTy = V->getType();
2218  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2219         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2220         "Cannot noop or zero extend with non-integer arguments!");
2221  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2222         "getNoopOrZeroExtend cannot truncate!");
2223  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2224    return V;  // No conversion
2225  return getZeroExtendExpr(V, Ty);
2226}
2227
2228/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2229/// input value to the specified type.  If the type must be extended, it is sign
2230/// extended.  The conversion must not be narrowing.
2231const SCEV *
2232ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2233  const Type *SrcTy = V->getType();
2234  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2235         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2236         "Cannot noop or sign extend with non-integer arguments!");
2237  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2238         "getNoopOrSignExtend cannot truncate!");
2239  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2240    return V;  // No conversion
2241  return getSignExtendExpr(V, Ty);
2242}
2243
2244/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2245/// the input value to the specified type. If the type must be extended,
2246/// it is extended with unspecified bits. The conversion must not be
2247/// narrowing.
2248const SCEV *
2249ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2250  const Type *SrcTy = V->getType();
2251  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2252         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2253         "Cannot noop or any extend with non-integer arguments!");
2254  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2255         "getNoopOrAnyExtend cannot truncate!");
2256  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2257    return V;  // No conversion
2258  return getAnyExtendExpr(V, Ty);
2259}
2260
2261/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2262/// input value to the specified type.  The conversion must not be widening.
2263const SCEV *
2264ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2265  const Type *SrcTy = V->getType();
2266  assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2267         (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2268         "Cannot truncate or noop with non-integer arguments!");
2269  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2270         "getTruncateOrNoop cannot extend!");
2271  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2272    return V;  // No conversion
2273  return getTruncateExpr(V, Ty);
2274}
2275
2276/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2277/// the types using zero-extension, and then perform a umax operation
2278/// with them.
2279const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2280                                                        const SCEV *RHS) {
2281  const SCEV *PromotedLHS = LHS;
2282  const SCEV *PromotedRHS = RHS;
2283
2284  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2285    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2286  else
2287    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2288
2289  return getUMaxExpr(PromotedLHS, PromotedRHS);
2290}
2291
2292/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2293/// the types using zero-extension, and then perform a umin operation
2294/// with them.
2295const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2296                                                        const SCEV *RHS) {
2297  const SCEV *PromotedLHS = LHS;
2298  const SCEV *PromotedRHS = RHS;
2299
2300  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2301    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2302  else
2303    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2304
2305  return getUMinExpr(PromotedLHS, PromotedRHS);
2306}
2307
2308/// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2309/// the specified instruction and replaces any references to the symbolic value
2310/// SymName with the specified value.  This is used during PHI resolution.
2311void
2312ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I,
2313                                                  const SCEV *SymName,
2314                                                  const SCEV *NewVal) {
2315  std::map<SCEVCallbackVH, const SCEV *>::iterator SI =
2316    Scalars.find(SCEVCallbackVH(I, this));
2317  if (SI == Scalars.end()) return;
2318
2319  const SCEV *NV =
2320    SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2321  if (NV == SI->second) return;  // No change.
2322
2323  SI->second = NV;       // Update the scalars map!
2324
2325  // Any instruction values that use this instruction might also need to be
2326  // updated!
2327  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2328       UI != E; ++UI)
2329    ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2330}
2331
2332/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2333/// a loop header, making it a potential recurrence, or it doesn't.
2334///
2335const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2336  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2337    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2338      if (L->getHeader() == PN->getParent()) {
2339        // If it lives in the loop header, it has two incoming values, one
2340        // from outside the loop, and one from inside.
2341        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2342        unsigned BackEdge     = IncomingEdge^1;
2343
2344        // While we are analyzing this PHI node, handle its value symbolically.
2345        const SCEV *SymbolicName = getUnknown(PN);
2346        assert(Scalars.find(PN) == Scalars.end() &&
2347               "PHI node already processed?");
2348        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2349
2350        // Using this symbolic name for the PHI, analyze the value coming around
2351        // the back-edge.
2352        const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2353
2354        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2355        // has a special value for the first iteration of the loop.
2356
2357        // If the value coming around the backedge is an add with the symbolic
2358        // value we just inserted, then we found a simple induction variable!
2359        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2360          // If there is a single occurrence of the symbolic value, replace it
2361          // with a recurrence.
2362          unsigned FoundIndex = Add->getNumOperands();
2363          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2364            if (Add->getOperand(i) == SymbolicName)
2365              if (FoundIndex == e) {
2366                FoundIndex = i;
2367                break;
2368              }
2369
2370          if (FoundIndex != Add->getNumOperands()) {
2371            // Create an add with everything but the specified operand.
2372            SmallVector<const SCEV *, 8> Ops;
2373            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2374              if (i != FoundIndex)
2375                Ops.push_back(Add->getOperand(i));
2376            const SCEV *Accum = getAddExpr(Ops);
2377
2378            // This is not a valid addrec if the step amount is varying each
2379            // loop iteration, but is not itself an addrec in this loop.
2380            if (Accum->isLoopInvariant(L) ||
2381                (isa<SCEVAddRecExpr>(Accum) &&
2382                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2383              const SCEV *StartVal =
2384                getSCEV(PN->getIncomingValue(IncomingEdge));
2385              const SCEV *PHISCEV =
2386                getAddRecExpr(StartVal, Accum, L);
2387
2388              // Okay, for the entire analysis of this edge we assumed the PHI
2389              // to be symbolic.  We now need to go back and update all of the
2390              // entries for the scalars that use the PHI (except for the PHI
2391              // itself) to use the new analyzed value instead of the "symbolic"
2392              // value.
2393              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2394              return PHISCEV;
2395            }
2396          }
2397        } else if (const SCEVAddRecExpr *AddRec =
2398                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2399          // Otherwise, this could be a loop like this:
2400          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2401          // In this case, j = {1,+,1}  and BEValue is j.
2402          // Because the other in-value of i (0) fits the evolution of BEValue
2403          // i really is an addrec evolution.
2404          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2405            const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2406
2407            // If StartVal = j.start - j.stride, we can use StartVal as the
2408            // initial step of the addrec evolution.
2409            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2410                                            AddRec->getOperand(1))) {
2411              const SCEV *PHISCEV =
2412                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2413
2414              // Okay, for the entire analysis of this edge we assumed the PHI
2415              // to be symbolic.  We now need to go back and update all of the
2416              // entries for the scalars that use the PHI (except for the PHI
2417              // itself) to use the new analyzed value instead of the "symbolic"
2418              // value.
2419              ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2420              return PHISCEV;
2421            }
2422          }
2423        }
2424
2425        return SymbolicName;
2426      }
2427
2428  // If it's not a loop phi, we can't handle it yet.
2429  return getUnknown(PN);
2430}
2431
2432/// createNodeForGEP - Expand GEP instructions into add and multiply
2433/// operations. This allows them to be analyzed by regular SCEV code.
2434///
2435const SCEV *ScalarEvolution::createNodeForGEP(User *GEP) {
2436
2437  const Type *IntPtrTy = TD->getIntPtrType();
2438  Value *Base = GEP->getOperand(0);
2439  // Don't attempt to analyze GEPs over unsized objects.
2440  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2441    return getUnknown(GEP);
2442  const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2443  gep_type_iterator GTI = gep_type_begin(GEP);
2444  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2445                                      E = GEP->op_end();
2446       I != E; ++I) {
2447    Value *Index = *I;
2448    // Compute the (potentially symbolic) offset in bytes for this index.
2449    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2450      // For a struct, add the member offset.
2451      const StructLayout &SL = *TD->getStructLayout(STy);
2452      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2453      uint64_t Offset = SL.getElementOffset(FieldNo);
2454      TotalOffset = getAddExpr(TotalOffset, getIntegerSCEV(Offset, IntPtrTy));
2455    } else {
2456      // For an array, add the element offset, explicitly scaled.
2457      const SCEV *LocalOffset = getSCEV(Index);
2458      if (!isa<PointerType>(LocalOffset->getType()))
2459        // Getelementptr indicies are signed.
2460        LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2461      LocalOffset =
2462        getMulExpr(LocalOffset,
2463                   getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy));
2464      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2465    }
2466  }
2467  return getAddExpr(getSCEV(Base), TotalOffset);
2468}
2469
2470/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2471/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2472/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2473/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2474uint32_t
2475ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2476  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2477    return C->getValue()->getValue().countTrailingZeros();
2478
2479  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2480    return std::min(GetMinTrailingZeros(T->getOperand()),
2481                    (uint32_t)getTypeSizeInBits(T->getType()));
2482
2483  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2484    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2485    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2486             getTypeSizeInBits(E->getType()) : OpRes;
2487  }
2488
2489  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2490    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2491    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2492             getTypeSizeInBits(E->getType()) : OpRes;
2493  }
2494
2495  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2496    // The result is the min of all operands results.
2497    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2498    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2499      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2500    return MinOpRes;
2501  }
2502
2503  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2504    // The result is the sum of all operands results.
2505    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2506    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2507    for (unsigned i = 1, e = M->getNumOperands();
2508         SumOpRes != BitWidth && i != e; ++i)
2509      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2510                          BitWidth);
2511    return SumOpRes;
2512  }
2513
2514  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2515    // The result is the min of all operands results.
2516    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2517    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2518      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2519    return MinOpRes;
2520  }
2521
2522  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2523    // The result is the min of all operands results.
2524    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2525    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2526      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2527    return MinOpRes;
2528  }
2529
2530  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2531    // The result is the min of all operands results.
2532    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2533    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2534      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2535    return MinOpRes;
2536  }
2537
2538  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2539    // For a SCEVUnknown, ask ValueTracking.
2540    unsigned BitWidth = getTypeSizeInBits(U->getType());
2541    APInt Mask = APInt::getAllOnesValue(BitWidth);
2542    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2543    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2544    return Zeros.countTrailingOnes();
2545  }
2546
2547  // SCEVUDivExpr
2548  return 0;
2549}
2550
2551/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2552///
2553ConstantRange
2554ScalarEvolution::getUnsignedRange(const SCEV *S) {
2555
2556  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2557    return ConstantRange(C->getValue()->getValue());
2558
2559  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2560    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2561    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2562      X = X.add(getUnsignedRange(Add->getOperand(i)));
2563    return X;
2564  }
2565
2566  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2567    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2568    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2569      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2570    return X;
2571  }
2572
2573  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2574    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2575    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2576      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2577    return X;
2578  }
2579
2580  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2581    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2582    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2583      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2584    return X;
2585  }
2586
2587  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2588    ConstantRange X = getUnsignedRange(UDiv->getLHS());
2589    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2590    return X.udiv(Y);
2591  }
2592
2593  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2594    ConstantRange X = getUnsignedRange(ZExt->getOperand());
2595    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2596  }
2597
2598  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2599    ConstantRange X = getUnsignedRange(SExt->getOperand());
2600    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2601  }
2602
2603  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2604    ConstantRange X = getUnsignedRange(Trunc->getOperand());
2605    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2606  }
2607
2608  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2609
2610  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2611    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2612    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2613    if (!Trip) return FullSet;
2614
2615    // TODO: non-affine addrec
2616    if (AddRec->isAffine()) {
2617      const Type *Ty = AddRec->getType();
2618      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2619      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2620        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2621
2622        const SCEV *Start = AddRec->getStart();
2623        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2624
2625        // Check for overflow.
2626        if (!isKnownPredicate(ICmpInst::ICMP_ULE, Start, End))
2627          return FullSet;
2628
2629        ConstantRange StartRange = getUnsignedRange(Start);
2630        ConstantRange EndRange = getUnsignedRange(End);
2631        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2632                                   EndRange.getUnsignedMin());
2633        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2634                                   EndRange.getUnsignedMax());
2635        if (Min.isMinValue() && Max.isMaxValue())
2636          return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true);
2637        return ConstantRange(Min, Max+1);
2638      }
2639    }
2640  }
2641
2642  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2643    // For a SCEVUnknown, ask ValueTracking.
2644    unsigned BitWidth = getTypeSizeInBits(U->getType());
2645    APInt Mask = APInt::getAllOnesValue(BitWidth);
2646    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2647    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2648    return ConstantRange(Ones, ~Zeros);
2649  }
2650
2651  return FullSet;
2652}
2653
2654/// getSignedRange - Determine the signed range for a particular SCEV.
2655///
2656ConstantRange
2657ScalarEvolution::getSignedRange(const SCEV *S) {
2658
2659  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2660    return ConstantRange(C->getValue()->getValue());
2661
2662  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2663    ConstantRange X = getSignedRange(Add->getOperand(0));
2664    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2665      X = X.add(getSignedRange(Add->getOperand(i)));
2666    return X;
2667  }
2668
2669  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2670    ConstantRange X = getSignedRange(Mul->getOperand(0));
2671    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2672      X = X.multiply(getSignedRange(Mul->getOperand(i)));
2673    return X;
2674  }
2675
2676  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2677    ConstantRange X = getSignedRange(SMax->getOperand(0));
2678    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2679      X = X.smax(getSignedRange(SMax->getOperand(i)));
2680    return X;
2681  }
2682
2683  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2684    ConstantRange X = getSignedRange(UMax->getOperand(0));
2685    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2686      X = X.umax(getSignedRange(UMax->getOperand(i)));
2687    return X;
2688  }
2689
2690  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2691    ConstantRange X = getSignedRange(UDiv->getLHS());
2692    ConstantRange Y = getSignedRange(UDiv->getRHS());
2693    return X.udiv(Y);
2694  }
2695
2696  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2697    ConstantRange X = getSignedRange(ZExt->getOperand());
2698    return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
2699  }
2700
2701  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2702    ConstantRange X = getSignedRange(SExt->getOperand());
2703    return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
2704  }
2705
2706  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2707    ConstantRange X = getSignedRange(Trunc->getOperand());
2708    return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
2709  }
2710
2711  ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
2712
2713  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2714    const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
2715    const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
2716    if (!Trip) return FullSet;
2717
2718    // TODO: non-affine addrec
2719    if (AddRec->isAffine()) {
2720      const Type *Ty = AddRec->getType();
2721      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2722      if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
2723        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2724
2725        const SCEV *Start = AddRec->getStart();
2726        const SCEV *Step = AddRec->getStepRecurrence(*this);
2727        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2728
2729        // Check for overflow.
2730        if (!(isKnownPositive(Step) &&
2731              isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) &&
2732            !(isKnownNegative(Step) &&
2733              isKnownPredicate(ICmpInst::ICMP_SGT, Start, End)))
2734          return FullSet;
2735
2736        ConstantRange StartRange = getSignedRange(Start);
2737        ConstantRange EndRange = getSignedRange(End);
2738        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
2739                                   EndRange.getSignedMin());
2740        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
2741                                   EndRange.getSignedMax());
2742        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
2743          return ConstantRange(Min.getBitWidth(), /*isFullSet=*/true);
2744        return ConstantRange(Min, Max+1);
2745      }
2746    }
2747  }
2748
2749  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2750    // For a SCEVUnknown, ask ValueTracking.
2751    unsigned BitWidth = getTypeSizeInBits(U->getType());
2752    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
2753    if (NS == 1)
2754      return FullSet;
2755    return
2756      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
2757                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1);
2758  }
2759
2760  return FullSet;
2761}
2762
2763/// createSCEV - We know that there is no SCEV for the specified value.
2764/// Analyze the expression.
2765///
2766const SCEV *ScalarEvolution::createSCEV(Value *V) {
2767  if (!isSCEVable(V->getType()))
2768    return getUnknown(V);
2769
2770  unsigned Opcode = Instruction::UserOp1;
2771  if (Instruction *I = dyn_cast<Instruction>(V))
2772    Opcode = I->getOpcode();
2773  else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2774    Opcode = CE->getOpcode();
2775  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2776    return getConstant(CI);
2777  else if (isa<ConstantPointerNull>(V))
2778    return getIntegerSCEV(0, V->getType());
2779  else if (isa<UndefValue>(V))
2780    return getIntegerSCEV(0, V->getType());
2781  else
2782    return getUnknown(V);
2783
2784  User *U = cast<User>(V);
2785  switch (Opcode) {
2786  case Instruction::Add:
2787    return getAddExpr(getSCEV(U->getOperand(0)),
2788                      getSCEV(U->getOperand(1)));
2789  case Instruction::Mul:
2790    return getMulExpr(getSCEV(U->getOperand(0)),
2791                      getSCEV(U->getOperand(1)));
2792  case Instruction::UDiv:
2793    return getUDivExpr(getSCEV(U->getOperand(0)),
2794                       getSCEV(U->getOperand(1)));
2795  case Instruction::Sub:
2796    return getMinusSCEV(getSCEV(U->getOperand(0)),
2797                        getSCEV(U->getOperand(1)));
2798  case Instruction::And:
2799    // For an expression like x&255 that merely masks off the high bits,
2800    // use zext(trunc(x)) as the SCEV expression.
2801    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2802      if (CI->isNullValue())
2803        return getSCEV(U->getOperand(1));
2804      if (CI->isAllOnesValue())
2805        return getSCEV(U->getOperand(0));
2806      const APInt &A = CI->getValue();
2807
2808      // Instcombine's ShrinkDemandedConstant may strip bits out of
2809      // constants, obscuring what would otherwise be a low-bits mask.
2810      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2811      // knew about to reconstruct a low-bits mask value.
2812      unsigned LZ = A.countLeadingZeros();
2813      unsigned BitWidth = A.getBitWidth();
2814      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2815      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2816      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2817
2818      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2819
2820      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2821        return
2822          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2823                                            IntegerType::get(BitWidth - LZ)),
2824                            U->getType());
2825    }
2826    break;
2827
2828  case Instruction::Or:
2829    // If the RHS of the Or is a constant, we may have something like:
2830    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
2831    // optimizations will transparently handle this case.
2832    //
2833    // In order for this transformation to be safe, the LHS must be of the
2834    // form X*(2^n) and the Or constant must be less than 2^n.
2835    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2836      const SCEV *LHS = getSCEV(U->getOperand(0));
2837      const APInt &CIVal = CI->getValue();
2838      if (GetMinTrailingZeros(LHS) >=
2839          (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2840        return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2841    }
2842    break;
2843  case Instruction::Xor:
2844    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2845      // If the RHS of the xor is a signbit, then this is just an add.
2846      // Instcombine turns add of signbit into xor as a strength reduction step.
2847      if (CI->getValue().isSignBit())
2848        return getAddExpr(getSCEV(U->getOperand(0)),
2849                          getSCEV(U->getOperand(1)));
2850
2851      // If the RHS of xor is -1, then this is a not operation.
2852      if (CI->isAllOnesValue())
2853        return getNotSCEV(getSCEV(U->getOperand(0)));
2854
2855      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2856      // This is a variant of the check for xor with -1, and it handles
2857      // the case where instcombine has trimmed non-demanded bits out
2858      // of an xor with -1.
2859      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2860        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2861          if (BO->getOpcode() == Instruction::And &&
2862              LCI->getValue() == CI->getValue())
2863            if (const SCEVZeroExtendExpr *Z =
2864                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2865              const Type *UTy = U->getType();
2866              const SCEV *Z0 = Z->getOperand();
2867              const Type *Z0Ty = Z0->getType();
2868              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2869
2870              // If C is a low-bits mask, the zero extend is zerving to
2871              // mask off the high bits. Complement the operand and
2872              // re-apply the zext.
2873              if (APIntOps::isMask(Z0TySize, CI->getValue()))
2874                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2875
2876              // If C is a single bit, it may be in the sign-bit position
2877              // before the zero-extend. In this case, represent the xor
2878              // using an add, which is equivalent, and re-apply the zext.
2879              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2880              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2881                  Trunc.isSignBit())
2882                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2883                                         UTy);
2884            }
2885    }
2886    break;
2887
2888  case Instruction::Shl:
2889    // Turn shift left of a constant amount into a multiply.
2890    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2891      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2892      Constant *X = ConstantInt::get(
2893        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2894      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2895    }
2896    break;
2897
2898  case Instruction::LShr:
2899    // Turn logical shift right of a constant into a unsigned divide.
2900    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2901      uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2902      Constant *X = ConstantInt::get(
2903        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2904      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2905    }
2906    break;
2907
2908  case Instruction::AShr:
2909    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2910    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2911      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2912        if (L->getOpcode() == Instruction::Shl &&
2913            L->getOperand(1) == U->getOperand(1)) {
2914          unsigned BitWidth = getTypeSizeInBits(U->getType());
2915          uint64_t Amt = BitWidth - CI->getZExtValue();
2916          if (Amt == BitWidth)
2917            return getSCEV(L->getOperand(0));       // shift by zero --> noop
2918          if (Amt > BitWidth)
2919            return getIntegerSCEV(0, U->getType()); // value is undefined
2920          return
2921            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2922                                                      IntegerType::get(Amt)),
2923                                 U->getType());
2924        }
2925    break;
2926
2927  case Instruction::Trunc:
2928    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2929
2930  case Instruction::ZExt:
2931    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2932
2933  case Instruction::SExt:
2934    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2935
2936  case Instruction::BitCast:
2937    // BitCasts are no-op casts so we just eliminate the cast.
2938    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2939      return getSCEV(U->getOperand(0));
2940    break;
2941
2942  case Instruction::IntToPtr:
2943    if (!TD) break; // Without TD we can't analyze pointers.
2944    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2945                                   TD->getIntPtrType());
2946
2947  case Instruction::PtrToInt:
2948    if (!TD) break; // Without TD we can't analyze pointers.
2949    return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2950                                   U->getType());
2951
2952  case Instruction::GetElementPtr:
2953    if (!TD) break; // Without TD we can't analyze pointers.
2954    return createNodeForGEP(U);
2955
2956  case Instruction::PHI:
2957    return createNodeForPHI(cast<PHINode>(U));
2958
2959  case Instruction::Select:
2960    // This could be a smax or umax that was lowered earlier.
2961    // Try to recover it.
2962    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2963      Value *LHS = ICI->getOperand(0);
2964      Value *RHS = ICI->getOperand(1);
2965      switch (ICI->getPredicate()) {
2966      case ICmpInst::ICMP_SLT:
2967      case ICmpInst::ICMP_SLE:
2968        std::swap(LHS, RHS);
2969        // fall through
2970      case ICmpInst::ICMP_SGT:
2971      case ICmpInst::ICMP_SGE:
2972        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2973          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2974        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2975          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2976        break;
2977      case ICmpInst::ICMP_ULT:
2978      case ICmpInst::ICMP_ULE:
2979        std::swap(LHS, RHS);
2980        // fall through
2981      case ICmpInst::ICMP_UGT:
2982      case ICmpInst::ICMP_UGE:
2983        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2984          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2985        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2986          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2987        break;
2988      case ICmpInst::ICMP_NE:
2989        // n != 0 ? n : 1  ->  umax(n, 1)
2990        if (LHS == U->getOperand(1) &&
2991            isa<ConstantInt>(U->getOperand(2)) &&
2992            cast<ConstantInt>(U->getOperand(2))->isOne() &&
2993            isa<ConstantInt>(RHS) &&
2994            cast<ConstantInt>(RHS)->isZero())
2995          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2996        break;
2997      case ICmpInst::ICMP_EQ:
2998        // n == 0 ? 1 : n  ->  umax(n, 1)
2999        if (LHS == U->getOperand(2) &&
3000            isa<ConstantInt>(U->getOperand(1)) &&
3001            cast<ConstantInt>(U->getOperand(1))->isOne() &&
3002            isa<ConstantInt>(RHS) &&
3003            cast<ConstantInt>(RHS)->isZero())
3004          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
3005        break;
3006      default:
3007        break;
3008      }
3009    }
3010
3011  default: // We cannot analyze this expression.
3012    break;
3013  }
3014
3015  return getUnknown(V);
3016}
3017
3018
3019
3020//===----------------------------------------------------------------------===//
3021//                   Iteration Count Computation Code
3022//
3023
3024/// getBackedgeTakenCount - If the specified loop has a predictable
3025/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3026/// object. The backedge-taken count is the number of times the loop header
3027/// will be branched to from within the loop. This is one less than the
3028/// trip count of the loop, since it doesn't count the first iteration,
3029/// when the header is branched to from outside the loop.
3030///
3031/// Note that it is not valid to call this method on a loop without a
3032/// loop-invariant backedge-taken count (see
3033/// hasLoopInvariantBackedgeTakenCount).
3034///
3035const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3036  return getBackedgeTakenInfo(L).Exact;
3037}
3038
3039/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3040/// return the least SCEV value that is known never to be less than the
3041/// actual backedge taken count.
3042const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3043  return getBackedgeTakenInfo(L).Max;
3044}
3045
3046/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3047/// onto the given Worklist.
3048static void
3049PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3050  BasicBlock *Header = L->getHeader();
3051
3052  // Push all Loop-header PHIs onto the Worklist stack.
3053  for (BasicBlock::iterator I = Header->begin();
3054       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3055    Worklist.push_back(PN);
3056}
3057
3058/// PushDefUseChildren - Push users of the given Instruction
3059/// onto the given Worklist.
3060static void
3061PushDefUseChildren(Instruction *I,
3062                   SmallVectorImpl<Instruction *> &Worklist) {
3063  // Push the def-use children onto the Worklist stack.
3064  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
3065       UI != UE; ++UI)
3066    Worklist.push_back(cast<Instruction>(UI));
3067}
3068
3069const ScalarEvolution::BackedgeTakenInfo &
3070ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3071  // Initially insert a CouldNotCompute for this loop. If the insertion
3072  // succeeds, procede to actually compute a backedge-taken count and
3073  // update the value. The temporary CouldNotCompute value tells SCEV
3074  // code elsewhere that it shouldn't attempt to request a new
3075  // backedge-taken count, which could result in infinite recursion.
3076  std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
3077    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3078  if (Pair.second) {
3079    BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
3080    if (ItCount.Exact != getCouldNotCompute()) {
3081      assert(ItCount.Exact->isLoopInvariant(L) &&
3082             ItCount.Max->isLoopInvariant(L) &&
3083             "Computed trip count isn't loop invariant for loop!");
3084      ++NumTripCountsComputed;
3085
3086      // Update the value in the map.
3087      Pair.first->second = ItCount;
3088    } else {
3089      if (ItCount.Max != getCouldNotCompute())
3090        // Update the value in the map.
3091        Pair.first->second = ItCount;
3092      if (isa<PHINode>(L->getHeader()->begin()))
3093        // Only count loops that have phi nodes as not being computable.
3094        ++NumTripCountsNotComputed;
3095    }
3096
3097    // Now that we know more about the trip count for this loop, forget any
3098    // existing SCEV values for PHI nodes in this loop since they are only
3099    // conservative estimates made without the benefit of trip count
3100    // information. This is similar to the code in
3101    // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
3102    // nodes specially.
3103    if (ItCount.hasAnyInfo()) {
3104      SmallVector<Instruction *, 16> Worklist;
3105      PushLoopPHIs(L, Worklist);
3106
3107      SmallPtrSet<Instruction *, 8> Visited;
3108      while (!Worklist.empty()) {
3109        Instruction *I = Worklist.pop_back_val();
3110        if (!Visited.insert(I)) continue;
3111
3112        std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3113          Scalars.find(static_cast<Value *>(I));
3114        if (It != Scalars.end()) {
3115          // SCEVUnknown for a PHI either means that it has an unrecognized
3116          // structure, or it's a PHI that's in the progress of being computed
3117          // by createNodeForPHI.  In the former case, additional loop trip count
3118          // information isn't going to change anything. In the later case,
3119          // createNodeForPHI will perform the necessary updates on its own when
3120          // it gets to that point.
3121          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
3122            Scalars.erase(It);
3123          ValuesAtScopes.erase(I);
3124          if (PHINode *PN = dyn_cast<PHINode>(I))
3125            ConstantEvolutionLoopExitValue.erase(PN);
3126        }
3127
3128        PushDefUseChildren(I, Worklist);
3129      }
3130    }
3131  }
3132  return Pair.first->second;
3133}
3134
3135/// forgetLoopBackedgeTakenCount - This method should be called by the
3136/// client when it has changed a loop in a way that may effect
3137/// ScalarEvolution's ability to compute a trip count, or if the loop
3138/// is deleted.
3139void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
3140  BackedgeTakenCounts.erase(L);
3141
3142  SmallVector<Instruction *, 16> Worklist;
3143  PushLoopPHIs(L, Worklist);
3144
3145  SmallPtrSet<Instruction *, 8> Visited;
3146  while (!Worklist.empty()) {
3147    Instruction *I = Worklist.pop_back_val();
3148    if (!Visited.insert(I)) continue;
3149
3150    std::map<SCEVCallbackVH, const SCEV*>::iterator It =
3151      Scalars.find(static_cast<Value *>(I));
3152    if (It != Scalars.end()) {
3153      Scalars.erase(It);
3154      ValuesAtScopes.erase(I);
3155      if (PHINode *PN = dyn_cast<PHINode>(I))
3156        ConstantEvolutionLoopExitValue.erase(PN);
3157    }
3158
3159    PushDefUseChildren(I, Worklist);
3160  }
3161}
3162
3163/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3164/// of the specified loop will execute.
3165ScalarEvolution::BackedgeTakenInfo
3166ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3167  SmallVector<BasicBlock*, 8> ExitingBlocks;
3168  L->getExitingBlocks(ExitingBlocks);
3169
3170  // Examine all exits and pick the most conservative values.
3171  const SCEV *BECount = getCouldNotCompute();
3172  const SCEV *MaxBECount = getCouldNotCompute();
3173  bool CouldNotComputeBECount = false;
3174  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3175    BackedgeTakenInfo NewBTI =
3176      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3177
3178    if (NewBTI.Exact == getCouldNotCompute()) {
3179      // We couldn't compute an exact value for this exit, so
3180      // we won't be able to compute an exact value for the loop.
3181      CouldNotComputeBECount = true;
3182      BECount = getCouldNotCompute();
3183    } else if (!CouldNotComputeBECount) {
3184      if (BECount == getCouldNotCompute())
3185        BECount = NewBTI.Exact;
3186      else
3187        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3188    }
3189    if (MaxBECount == getCouldNotCompute())
3190      MaxBECount = NewBTI.Max;
3191    else if (NewBTI.Max != getCouldNotCompute())
3192      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3193  }
3194
3195  return BackedgeTakenInfo(BECount, MaxBECount);
3196}
3197
3198/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3199/// of the specified loop will execute if it exits via the specified block.
3200ScalarEvolution::BackedgeTakenInfo
3201ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3202                                                   BasicBlock *ExitingBlock) {
3203
3204  // Okay, we've chosen an exiting block.  See what condition causes us to
3205  // exit at this block.
3206  //
3207  // FIXME: we should be able to handle switch instructions (with a single exit)
3208  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3209  if (ExitBr == 0) return getCouldNotCompute();
3210  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3211
3212  // At this point, we know we have a conditional branch that determines whether
3213  // the loop is exited.  However, we don't know if the branch is executed each
3214  // time through the loop.  If not, then the execution count of the branch will
3215  // not be equal to the trip count of the loop.
3216  //
3217  // Currently we check for this by checking to see if the Exit branch goes to
3218  // the loop header.  If so, we know it will always execute the same number of
3219  // times as the loop.  We also handle the case where the exit block *is* the
3220  // loop header.  This is common for un-rotated loops.
3221  //
3222  // If both of those tests fail, walk up the unique predecessor chain to the
3223  // header, stopping if there is an edge that doesn't exit the loop. If the
3224  // header is reached, the execution count of the branch will be equal to the
3225  // trip count of the loop.
3226  //
3227  //  More extensive analysis could be done to handle more cases here.
3228  //
3229  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3230      ExitBr->getSuccessor(1) != L->getHeader() &&
3231      ExitBr->getParent() != L->getHeader()) {
3232    // The simple checks failed, try climbing the unique predecessor chain
3233    // up to the header.
3234    bool Ok = false;
3235    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3236      BasicBlock *Pred = BB->getUniquePredecessor();
3237      if (!Pred)
3238        return getCouldNotCompute();
3239      TerminatorInst *PredTerm = Pred->getTerminator();
3240      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3241        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3242        if (PredSucc == BB)
3243          continue;
3244        // If the predecessor has a successor that isn't BB and isn't
3245        // outside the loop, assume the worst.
3246        if (L->contains(PredSucc))
3247          return getCouldNotCompute();
3248      }
3249      if (Pred == L->getHeader()) {
3250        Ok = true;
3251        break;
3252      }
3253      BB = Pred;
3254    }
3255    if (!Ok)
3256      return getCouldNotCompute();
3257  }
3258
3259  // Procede to the next level to examine the exit condition expression.
3260  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3261                                               ExitBr->getSuccessor(0),
3262                                               ExitBr->getSuccessor(1));
3263}
3264
3265/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3266/// backedge of the specified loop will execute if its exit condition
3267/// were a conditional branch of ExitCond, TBB, and FBB.
3268ScalarEvolution::BackedgeTakenInfo
3269ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3270                                                       Value *ExitCond,
3271                                                       BasicBlock *TBB,
3272                                                       BasicBlock *FBB) {
3273  // Check if the controlling expression for this loop is an And or Or.
3274  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3275    if (BO->getOpcode() == Instruction::And) {
3276      // Recurse on the operands of the and.
3277      BackedgeTakenInfo BTI0 =
3278        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3279      BackedgeTakenInfo BTI1 =
3280        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3281      const SCEV *BECount = getCouldNotCompute();
3282      const SCEV *MaxBECount = getCouldNotCompute();
3283      if (L->contains(TBB)) {
3284        // Both conditions must be true for the loop to continue executing.
3285        // Choose the less conservative count.
3286        if (BTI0.Exact == getCouldNotCompute() ||
3287            BTI1.Exact == getCouldNotCompute())
3288          BECount = getCouldNotCompute();
3289        else
3290          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3291        if (BTI0.Max == getCouldNotCompute())
3292          MaxBECount = BTI1.Max;
3293        else if (BTI1.Max == getCouldNotCompute())
3294          MaxBECount = BTI0.Max;
3295        else
3296          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3297      } else {
3298        // Both conditions must be true for the loop to exit.
3299        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3300        if (BTI0.Exact != getCouldNotCompute() &&
3301            BTI1.Exact != getCouldNotCompute())
3302          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3303        if (BTI0.Max != getCouldNotCompute() &&
3304            BTI1.Max != getCouldNotCompute())
3305          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3306      }
3307
3308      return BackedgeTakenInfo(BECount, MaxBECount);
3309    }
3310    if (BO->getOpcode() == Instruction::Or) {
3311      // Recurse on the operands of the or.
3312      BackedgeTakenInfo BTI0 =
3313        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3314      BackedgeTakenInfo BTI1 =
3315        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3316      const SCEV *BECount = getCouldNotCompute();
3317      const SCEV *MaxBECount = getCouldNotCompute();
3318      if (L->contains(FBB)) {
3319        // Both conditions must be false for the loop to continue executing.
3320        // Choose the less conservative count.
3321        if (BTI0.Exact == getCouldNotCompute() ||
3322            BTI1.Exact == getCouldNotCompute())
3323          BECount = getCouldNotCompute();
3324        else
3325          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3326        if (BTI0.Max == getCouldNotCompute())
3327          MaxBECount = BTI1.Max;
3328        else if (BTI1.Max == getCouldNotCompute())
3329          MaxBECount = BTI0.Max;
3330        else
3331          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3332      } else {
3333        // Both conditions must be false for the loop to exit.
3334        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3335        if (BTI0.Exact != getCouldNotCompute() &&
3336            BTI1.Exact != getCouldNotCompute())
3337          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3338        if (BTI0.Max != getCouldNotCompute() &&
3339            BTI1.Max != getCouldNotCompute())
3340          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3341      }
3342
3343      return BackedgeTakenInfo(BECount, MaxBECount);
3344    }
3345  }
3346
3347  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3348  // Procede to the next level to examine the icmp.
3349  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3350    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3351
3352  // If it's not an integer or pointer comparison then compute it the hard way.
3353  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3354}
3355
3356/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3357/// backedge of the specified loop will execute if its exit condition
3358/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3359ScalarEvolution::BackedgeTakenInfo
3360ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3361                                                           ICmpInst *ExitCond,
3362                                                           BasicBlock *TBB,
3363                                                           BasicBlock *FBB) {
3364
3365  // If the condition was exit on true, convert the condition to exit on false
3366  ICmpInst::Predicate Cond;
3367  if (!L->contains(FBB))
3368    Cond = ExitCond->getPredicate();
3369  else
3370    Cond = ExitCond->getInversePredicate();
3371
3372  // Handle common loops like: for (X = "string"; *X; ++X)
3373  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3374    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3375      const SCEV *ItCnt =
3376        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3377      if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3378        unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3379        return BackedgeTakenInfo(ItCnt,
3380                                 isa<SCEVConstant>(ItCnt) ? ItCnt :
3381                                   getConstant(APInt::getMaxValue(BitWidth)-1));
3382      }
3383    }
3384
3385  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3386  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3387
3388  // Try to evaluate any dependencies out of the loop.
3389  LHS = getSCEVAtScope(LHS, L);
3390  RHS = getSCEVAtScope(RHS, L);
3391
3392  // At this point, we would like to compute how many iterations of the
3393  // loop the predicate will return true for these inputs.
3394  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3395    // If there is a loop-invariant, force it into the RHS.
3396    std::swap(LHS, RHS);
3397    Cond = ICmpInst::getSwappedPredicate(Cond);
3398  }
3399
3400  // If we have a comparison of a chrec against a constant, try to use value
3401  // ranges to answer this query.
3402  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3403    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3404      if (AddRec->getLoop() == L) {
3405        // Form the constant range.
3406        ConstantRange CompRange(
3407            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3408
3409        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3410        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3411      }
3412
3413  switch (Cond) {
3414  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3415    // Convert to: while (X-Y != 0)
3416    const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3417    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3418    break;
3419  }
3420  case ICmpInst::ICMP_EQ: {
3421    // Convert to: while (X-Y == 0)           // while (X == Y)
3422    const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3423    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3424    break;
3425  }
3426  case ICmpInst::ICMP_SLT: {
3427    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3428    if (BTI.hasAnyInfo()) return BTI;
3429    break;
3430  }
3431  case ICmpInst::ICMP_SGT: {
3432    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3433                                             getNotSCEV(RHS), L, true);
3434    if (BTI.hasAnyInfo()) return BTI;
3435    break;
3436  }
3437  case ICmpInst::ICMP_ULT: {
3438    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3439    if (BTI.hasAnyInfo()) return BTI;
3440    break;
3441  }
3442  case ICmpInst::ICMP_UGT: {
3443    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3444                                             getNotSCEV(RHS), L, false);
3445    if (BTI.hasAnyInfo()) return BTI;
3446    break;
3447  }
3448  default:
3449#if 0
3450    errs() << "ComputeBackedgeTakenCount ";
3451    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3452      errs() << "[unsigned] ";
3453    errs() << *LHS << "   "
3454         << Instruction::getOpcodeName(Instruction::ICmp)
3455         << "   " << *RHS << "\n";
3456#endif
3457    break;
3458  }
3459  return
3460    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3461}
3462
3463static ConstantInt *
3464EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3465                                ScalarEvolution &SE) {
3466  const SCEV *InVal = SE.getConstant(C);
3467  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3468  assert(isa<SCEVConstant>(Val) &&
3469         "Evaluation of SCEV at constant didn't fold correctly?");
3470  return cast<SCEVConstant>(Val)->getValue();
3471}
3472
3473/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3474/// and a GEP expression (missing the pointer index) indexing into it, return
3475/// the addressed element of the initializer or null if the index expression is
3476/// invalid.
3477static Constant *
3478GetAddressedElementFromGlobal(GlobalVariable *GV,
3479                              const std::vector<ConstantInt*> &Indices) {
3480  Constant *Init = GV->getInitializer();
3481  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3482    uint64_t Idx = Indices[i]->getZExtValue();
3483    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3484      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3485      Init = cast<Constant>(CS->getOperand(Idx));
3486    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3487      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3488      Init = cast<Constant>(CA->getOperand(Idx));
3489    } else if (isa<ConstantAggregateZero>(Init)) {
3490      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3491        assert(Idx < STy->getNumElements() && "Bad struct index!");
3492        Init = Constant::getNullValue(STy->getElementType(Idx));
3493      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3494        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3495        Init = Constant::getNullValue(ATy->getElementType());
3496      } else {
3497        assert(0 && "Unknown constant aggregate type!");
3498      }
3499      return 0;
3500    } else {
3501      return 0; // Unknown initializer type
3502    }
3503  }
3504  return Init;
3505}
3506
3507/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3508/// 'icmp op load X, cst', try to see if we can compute the backedge
3509/// execution count.
3510const SCEV *
3511ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3512                                                LoadInst *LI,
3513                                                Constant *RHS,
3514                                                const Loop *L,
3515                                                ICmpInst::Predicate predicate) {
3516  if (LI->isVolatile()) return getCouldNotCompute();
3517
3518  // Check to see if the loaded pointer is a getelementptr of a global.
3519  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3520  if (!GEP) return getCouldNotCompute();
3521
3522  // Make sure that it is really a constant global we are gepping, with an
3523  // initializer, and make sure the first IDX is really 0.
3524  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3525  if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3526      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3527      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3528    return getCouldNotCompute();
3529
3530  // Okay, we allow one non-constant index into the GEP instruction.
3531  Value *VarIdx = 0;
3532  std::vector<ConstantInt*> Indexes;
3533  unsigned VarIdxNum = 0;
3534  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3535    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3536      Indexes.push_back(CI);
3537    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3538      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3539      VarIdx = GEP->getOperand(i);
3540      VarIdxNum = i-2;
3541      Indexes.push_back(0);
3542    }
3543
3544  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3545  // Check to see if X is a loop variant variable value now.
3546  const SCEV *Idx = getSCEV(VarIdx);
3547  Idx = getSCEVAtScope(Idx, L);
3548
3549  // We can only recognize very limited forms of loop index expressions, in
3550  // particular, only affine AddRec's like {C1,+,C2}.
3551  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3552  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3553      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3554      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3555    return getCouldNotCompute();
3556
3557  unsigned MaxSteps = MaxBruteForceIterations;
3558  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3559    ConstantInt *ItCst =
3560      ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum);
3561    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3562
3563    // Form the GEP offset.
3564    Indexes[VarIdxNum] = Val;
3565
3566    Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3567    if (Result == 0) break;  // Cannot compute!
3568
3569    // Evaluate the condition for this iteration.
3570    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3571    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3572    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3573#if 0
3574      errs() << "\n***\n*** Computed loop count " << *ItCst
3575             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3576             << "***\n";
3577#endif
3578      ++NumArrayLenItCounts;
3579      return getConstant(ItCst);   // Found terminating iteration!
3580    }
3581  }
3582  return getCouldNotCompute();
3583}
3584
3585
3586/// CanConstantFold - Return true if we can constant fold an instruction of the
3587/// specified type, assuming that all operands were constants.
3588static bool CanConstantFold(const Instruction *I) {
3589  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3590      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3591    return true;
3592
3593  if (const CallInst *CI = dyn_cast<CallInst>(I))
3594    if (const Function *F = CI->getCalledFunction())
3595      return canConstantFoldCallTo(F);
3596  return false;
3597}
3598
3599/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3600/// in the loop that V is derived from.  We allow arbitrary operations along the
3601/// way, but the operands of an operation must either be constants or a value
3602/// derived from a constant PHI.  If this expression does not fit with these
3603/// constraints, return null.
3604static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3605  // If this is not an instruction, or if this is an instruction outside of the
3606  // loop, it can't be derived from a loop PHI.
3607  Instruction *I = dyn_cast<Instruction>(V);
3608  if (I == 0 || !L->contains(I->getParent())) return 0;
3609
3610  if (PHINode *PN = dyn_cast<PHINode>(I)) {
3611    if (L->getHeader() == I->getParent())
3612      return PN;
3613    else
3614      // We don't currently keep track of the control flow needed to evaluate
3615      // PHIs, so we cannot handle PHIs inside of loops.
3616      return 0;
3617  }
3618
3619  // If we won't be able to constant fold this expression even if the operands
3620  // are constants, return early.
3621  if (!CanConstantFold(I)) return 0;
3622
3623  // Otherwise, we can evaluate this instruction if all of its operands are
3624  // constant or derived from a PHI node themselves.
3625  PHINode *PHI = 0;
3626  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3627    if (!(isa<Constant>(I->getOperand(Op)) ||
3628          isa<GlobalValue>(I->getOperand(Op)))) {
3629      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3630      if (P == 0) return 0;  // Not evolving from PHI
3631      if (PHI == 0)
3632        PHI = P;
3633      else if (PHI != P)
3634        return 0;  // Evolving from multiple different PHIs.
3635    }
3636
3637  // This is a expression evolving from a constant PHI!
3638  return PHI;
3639}
3640
3641/// EvaluateExpression - Given an expression that passes the
3642/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3643/// in the loop has the value PHIVal.  If we can't fold this expression for some
3644/// reason, return null.
3645static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3646  if (isa<PHINode>(V)) return PHIVal;
3647  if (Constant *C = dyn_cast<Constant>(V)) return C;
3648  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3649  Instruction *I = cast<Instruction>(V);
3650  LLVMContext *Context = I->getParent()->getContext();
3651
3652  std::vector<Constant*> Operands;
3653  Operands.resize(I->getNumOperands());
3654
3655  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3656    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3657    if (Operands[i] == 0) return 0;
3658  }
3659
3660  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3661    return ConstantFoldCompareInstOperands(CI->getPredicate(),
3662                                           &Operands[0], Operands.size(),
3663                                           Context);
3664  else
3665    return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3666                                    &Operands[0], Operands.size(),
3667                                    Context);
3668}
3669
3670/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3671/// in the header of its containing loop, we know the loop executes a
3672/// constant number of times, and the PHI node is just a recurrence
3673/// involving constants, fold it.
3674Constant *
3675ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3676                                                   const APInt& BEs,
3677                                                   const Loop *L) {
3678  std::map<PHINode*, Constant*>::iterator I =
3679    ConstantEvolutionLoopExitValue.find(PN);
3680  if (I != ConstantEvolutionLoopExitValue.end())
3681    return I->second;
3682
3683  if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3684    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
3685
3686  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3687
3688  // Since the loop is canonicalized, the PHI node must have two entries.  One
3689  // entry must be a constant (coming in from outside of the loop), and the
3690  // second must be derived from the same PHI.
3691  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3692  Constant *StartCST =
3693    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3694  if (StartCST == 0)
3695    return RetVal = 0;  // Must be a constant.
3696
3697  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3698  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3699  if (PN2 != PN)
3700    return RetVal = 0;  // Not derived from same PHI.
3701
3702  // Execute the loop symbolically to determine the exit value.
3703  if (BEs.getActiveBits() >= 32)
3704    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3705
3706  unsigned NumIterations = BEs.getZExtValue(); // must be in range
3707  unsigned IterationNum = 0;
3708  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3709    if (IterationNum == NumIterations)
3710      return RetVal = PHIVal;  // Got exit value!
3711
3712    // Compute the value of the PHI node for the next iteration.
3713    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3714    if (NextPHI == PHIVal)
3715      return RetVal = NextPHI;  // Stopped evolving!
3716    if (NextPHI == 0)
3717      return 0;        // Couldn't evaluate!
3718    PHIVal = NextPHI;
3719  }
3720}
3721
3722/// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3723/// constant number of times (the condition evolves only from constants),
3724/// try to evaluate a few iterations of the loop until we get the exit
3725/// condition gets a value of ExitWhen (true or false).  If we cannot
3726/// evaluate the trip count of the loop, return getCouldNotCompute().
3727const SCEV *
3728ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3729                                                       Value *Cond,
3730                                                       bool ExitWhen) {
3731  PHINode *PN = getConstantEvolvingPHI(Cond, L);
3732  if (PN == 0) return getCouldNotCompute();
3733
3734  // Since the loop is canonicalized, the PHI node must have two entries.  One
3735  // entry must be a constant (coming in from outside of the loop), and the
3736  // second must be derived from the same PHI.
3737  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3738  Constant *StartCST =
3739    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3740  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
3741
3742  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3743  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3744  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
3745
3746  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
3747  // the loop symbolically to determine when the condition gets a value of
3748  // "ExitWhen".
3749  unsigned IterationNum = 0;
3750  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
3751  for (Constant *PHIVal = StartCST;
3752       IterationNum != MaxIterations; ++IterationNum) {
3753    ConstantInt *CondVal =
3754      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3755
3756    // Couldn't symbolically evaluate.
3757    if (!CondVal) return getCouldNotCompute();
3758
3759    if (CondVal->getValue() == uint64_t(ExitWhen)) {
3760      ++NumBruteForceTripCountsComputed;
3761      return getConstant(Type::Int32Ty, IterationNum);
3762    }
3763
3764    // Compute the value of the PHI node for the next iteration.
3765    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3766    if (NextPHI == 0 || NextPHI == PHIVal)
3767      return getCouldNotCompute();// Couldn't evaluate or not making progress...
3768    PHIVal = NextPHI;
3769  }
3770
3771  // Too many iterations were needed to evaluate.
3772  return getCouldNotCompute();
3773}
3774
3775/// getSCEVAtScope - Return a SCEV expression handle for the specified value
3776/// at the specified scope in the program.  The L value specifies a loop
3777/// nest to evaluate the expression at, where null is the top-level or a
3778/// specified loop is immediately inside of the loop.
3779///
3780/// This method can be used to compute the exit value for a variable defined
3781/// in a loop by querying what the value will hold in the parent loop.
3782///
3783/// In the case that a relevant loop exit value cannot be computed, the
3784/// original value V is returned.
3785const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3786  // FIXME: this should be turned into a virtual method on SCEV!
3787
3788  if (isa<SCEVConstant>(V)) return V;
3789
3790  // If this instruction is evolved from a constant-evolving PHI, compute the
3791  // exit value from the loop without using SCEVs.
3792  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3793    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3794      const Loop *LI = (*this->LI)[I->getParent()];
3795      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
3796        if (PHINode *PN = dyn_cast<PHINode>(I))
3797          if (PN->getParent() == LI->getHeader()) {
3798            // Okay, there is no closed form solution for the PHI node.  Check
3799            // to see if the loop that contains it has a known backedge-taken
3800            // count.  If so, we may be able to force computation of the exit
3801            // value.
3802            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3803            if (const SCEVConstant *BTCC =
3804                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3805              // Okay, we know how many times the containing loop executes.  If
3806              // this is a constant evolving PHI node, get the final value at
3807              // the specified iteration number.
3808              Constant *RV = getConstantEvolutionLoopExitValue(PN,
3809                                                   BTCC->getValue()->getValue(),
3810                                                               LI);
3811              if (RV) return getSCEV(RV);
3812            }
3813          }
3814
3815      // Okay, this is an expression that we cannot symbolically evaluate
3816      // into a SCEV.  Check to see if it's possible to symbolically evaluate
3817      // the arguments into constants, and if so, try to constant propagate the
3818      // result.  This is particularly useful for computing loop exit values.
3819      if (CanConstantFold(I)) {
3820        // Check to see if we've folded this instruction at this loop before.
3821        std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3822        std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3823          Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3824        if (!Pair.second)
3825          return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3826
3827        std::vector<Constant*> Operands;
3828        Operands.reserve(I->getNumOperands());
3829        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3830          Value *Op = I->getOperand(i);
3831          if (Constant *C = dyn_cast<Constant>(Op)) {
3832            Operands.push_back(C);
3833          } else {
3834            // If any of the operands is non-constant and if they are
3835            // non-integer and non-pointer, don't even try to analyze them
3836            // with scev techniques.
3837            if (!isSCEVable(Op->getType()))
3838              return V;
3839
3840            const SCEV* OpV = getSCEVAtScope(Op, L);
3841            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3842              Constant *C = SC->getValue();
3843              if (C->getType() != Op->getType())
3844                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3845                                                                  Op->getType(),
3846                                                                  false),
3847                                          C, Op->getType());
3848              Operands.push_back(C);
3849            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3850              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3851                if (C->getType() != Op->getType())
3852                  C =
3853                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3854                                                                  Op->getType(),
3855                                                                  false),
3856                                          C, Op->getType());
3857                Operands.push_back(C);
3858              } else
3859                return V;
3860            } else {
3861              return V;
3862            }
3863          }
3864        }
3865
3866        Constant *C;
3867        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3868          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3869                                              &Operands[0], Operands.size(),
3870                                              Context);
3871        else
3872          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3873                                       &Operands[0], Operands.size(), Context);
3874        Pair.first->second = C;
3875        return getSCEV(C);
3876      }
3877    }
3878
3879    // This is some other type of SCEVUnknown, just return it.
3880    return V;
3881  }
3882
3883  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3884    // Avoid performing the look-up in the common case where the specified
3885    // expression has no loop-variant portions.
3886    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3887      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3888      if (OpAtScope != Comm->getOperand(i)) {
3889        // Okay, at least one of these operands is loop variant but might be
3890        // foldable.  Build a new instance of the folded commutative expression.
3891        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3892                                            Comm->op_begin()+i);
3893        NewOps.push_back(OpAtScope);
3894
3895        for (++i; i != e; ++i) {
3896          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3897          NewOps.push_back(OpAtScope);
3898        }
3899        if (isa<SCEVAddExpr>(Comm))
3900          return getAddExpr(NewOps);
3901        if (isa<SCEVMulExpr>(Comm))
3902          return getMulExpr(NewOps);
3903        if (isa<SCEVSMaxExpr>(Comm))
3904          return getSMaxExpr(NewOps);
3905        if (isa<SCEVUMaxExpr>(Comm))
3906          return getUMaxExpr(NewOps);
3907        assert(0 && "Unknown commutative SCEV type!");
3908      }
3909    }
3910    // If we got here, all operands are loop invariant.
3911    return Comm;
3912  }
3913
3914  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3915    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3916    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3917    if (LHS == Div->getLHS() && RHS == Div->getRHS())
3918      return Div;   // must be loop invariant
3919    return getUDivExpr(LHS, RHS);
3920  }
3921
3922  // If this is a loop recurrence for a loop that does not contain L, then we
3923  // are dealing with the final value computed by the loop.
3924  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3925    if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3926      // To evaluate this recurrence, we need to know how many times the AddRec
3927      // loop iterates.  Compute this now.
3928      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3929      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3930
3931      // Then, evaluate the AddRec.
3932      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3933    }
3934    return AddRec;
3935  }
3936
3937  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3938    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3939    if (Op == Cast->getOperand())
3940      return Cast;  // must be loop invariant
3941    return getZeroExtendExpr(Op, Cast->getType());
3942  }
3943
3944  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3945    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3946    if (Op == Cast->getOperand())
3947      return Cast;  // must be loop invariant
3948    return getSignExtendExpr(Op, Cast->getType());
3949  }
3950
3951  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3952    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3953    if (Op == Cast->getOperand())
3954      return Cast;  // must be loop invariant
3955    return getTruncateExpr(Op, Cast->getType());
3956  }
3957
3958  assert(0 && "Unknown SCEV type!");
3959  return 0;
3960}
3961
3962/// getSCEVAtScope - This is a convenience function which does
3963/// getSCEVAtScope(getSCEV(V), L).
3964const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3965  return getSCEVAtScope(getSCEV(V), L);
3966}
3967
3968/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3969/// following equation:
3970///
3971///     A * X = B (mod N)
3972///
3973/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3974/// A and B isn't important.
3975///
3976/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3977static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3978                                               ScalarEvolution &SE) {
3979  uint32_t BW = A.getBitWidth();
3980  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3981  assert(A != 0 && "A must be non-zero.");
3982
3983  // 1. D = gcd(A, N)
3984  //
3985  // The gcd of A and N may have only one prime factor: 2. The number of
3986  // trailing zeros in A is its multiplicity
3987  uint32_t Mult2 = A.countTrailingZeros();
3988  // D = 2^Mult2
3989
3990  // 2. Check if B is divisible by D.
3991  //
3992  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3993  // is not less than multiplicity of this prime factor for D.
3994  if (B.countTrailingZeros() < Mult2)
3995    return SE.getCouldNotCompute();
3996
3997  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3998  // modulo (N / D).
3999  //
4000  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4001  // bit width during computations.
4002  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4003  APInt Mod(BW + 1, 0);
4004  Mod.set(BW - Mult2);  // Mod = N / D
4005  APInt I = AD.multiplicativeInverse(Mod);
4006
4007  // 4. Compute the minimum unsigned root of the equation:
4008  // I * (B / D) mod (N / D)
4009  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4010
4011  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4012  // bits.
4013  return SE.getConstant(Result.trunc(BW));
4014}
4015
4016/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4017/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4018/// might be the same) or two SCEVCouldNotCompute objects.
4019///
4020static std::pair<const SCEV *,const SCEV *>
4021SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4022  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4023  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4024  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4025  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4026
4027  // We currently can only solve this if the coefficients are constants.
4028  if (!LC || !MC || !NC) {
4029    const SCEV *CNC = SE.getCouldNotCompute();
4030    return std::make_pair(CNC, CNC);
4031  }
4032
4033  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4034  const APInt &L = LC->getValue()->getValue();
4035  const APInt &M = MC->getValue()->getValue();
4036  const APInt &N = NC->getValue()->getValue();
4037  APInt Two(BitWidth, 2);
4038  APInt Four(BitWidth, 4);
4039
4040  {
4041    using namespace APIntOps;
4042    const APInt& C = L;
4043    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4044    // The B coefficient is M-N/2
4045    APInt B(M);
4046    B -= sdiv(N,Two);
4047
4048    // The A coefficient is N/2
4049    APInt A(N.sdiv(Two));
4050
4051    // Compute the B^2-4ac term.
4052    APInt SqrtTerm(B);
4053    SqrtTerm *= B;
4054    SqrtTerm -= Four * (A * C);
4055
4056    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4057    // integer value or else APInt::sqrt() will assert.
4058    APInt SqrtVal(SqrtTerm.sqrt());
4059
4060    // Compute the two solutions for the quadratic formula.
4061    // The divisions must be performed as signed divisions.
4062    APInt NegB(-B);
4063    APInt TwoA( A << 1 );
4064    if (TwoA.isMinValue()) {
4065      const SCEV *CNC = SE.getCouldNotCompute();
4066      return std::make_pair(CNC, CNC);
4067    }
4068
4069    LLVMContext *Context = SE.getContext();
4070
4071    ConstantInt *Solution1 =
4072      Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA));
4073    ConstantInt *Solution2 =
4074      Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA));
4075
4076    return std::make_pair(SE.getConstant(Solution1),
4077                          SE.getConstant(Solution2));
4078    } // end APIntOps namespace
4079}
4080
4081/// HowFarToZero - Return the number of times a backedge comparing the specified
4082/// value to zero will execute.  If not computable, return CouldNotCompute.
4083const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4084  // If the value is a constant
4085  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4086    // If the value is already zero, the branch will execute zero times.
4087    if (C->getValue()->isZero()) return C;
4088    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4089  }
4090
4091  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4092  if (!AddRec || AddRec->getLoop() != L)
4093    return getCouldNotCompute();
4094
4095  if (AddRec->isAffine()) {
4096    // If this is an affine expression, the execution count of this branch is
4097    // the minimum unsigned root of the following equation:
4098    //
4099    //     Start + Step*N = 0 (mod 2^BW)
4100    //
4101    // equivalent to:
4102    //
4103    //             Step*N = -Start (mod 2^BW)
4104    //
4105    // where BW is the common bit width of Start and Step.
4106
4107    // Get the initial value for the loop.
4108    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4109                                       L->getParentLoop());
4110    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4111                                      L->getParentLoop());
4112
4113    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4114      // For now we handle only constant steps.
4115
4116      // First, handle unitary steps.
4117      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4118        return getNegativeSCEV(Start);       //   N = -Start (as unsigned)
4119      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4120        return Start;                           //    N = Start (as unsigned)
4121
4122      // Then, try to solve the above equation provided that Start is constant.
4123      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4124        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4125                                            -StartC->getValue()->getValue(),
4126                                            *this);
4127    }
4128  } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
4129    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4130    // the quadratic equation to solve it.
4131    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4132                                                                    *this);
4133    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4134    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4135    if (R1) {
4136#if 0
4137      errs() << "HFTZ: " << *V << " - sol#1: " << *R1
4138             << "  sol#2: " << *R2 << "\n";
4139#endif
4140      // Pick the smallest positive root value.
4141      if (ConstantInt *CB =
4142          dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT,
4143                                   R1->getValue(), R2->getValue()))) {
4144        if (CB->getZExtValue() == false)
4145          std::swap(R1, R2);   // R1 is the minimum root now.
4146
4147        // We can only use this value if the chrec ends up with an exact zero
4148        // value at this index.  When solving for "X*X != 5", for example, we
4149        // should not accept a root of 2.
4150        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4151        if (Val->isZero())
4152          return R1;  // We found a quadratic root!
4153      }
4154    }
4155  }
4156
4157  return getCouldNotCompute();
4158}
4159
4160/// HowFarToNonZero - Return the number of times a backedge checking the
4161/// specified value for nonzero will execute.  If not computable, return
4162/// CouldNotCompute
4163const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4164  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4165  // handle them yet except for the trivial case.  This could be expanded in the
4166  // future as needed.
4167
4168  // If the value is a constant, check to see if it is known to be non-zero
4169  // already.  If so, the backedge will execute zero times.
4170  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4171    if (!C->getValue()->isNullValue())
4172      return getIntegerSCEV(0, C->getType());
4173    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4174  }
4175
4176  // We could implement others, but I really doubt anyone writes loops like
4177  // this, and if they did, they would already be constant folded.
4178  return getCouldNotCompute();
4179}
4180
4181/// getLoopPredecessor - If the given loop's header has exactly one unique
4182/// predecessor outside the loop, return it. Otherwise return null.
4183///
4184BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4185  BasicBlock *Header = L->getHeader();
4186  BasicBlock *Pred = 0;
4187  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4188       PI != E; ++PI)
4189    if (!L->contains(*PI)) {
4190      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4191      Pred = *PI;
4192    }
4193  return Pred;
4194}
4195
4196/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4197/// (which may not be an immediate predecessor) which has exactly one
4198/// successor from which BB is reachable, or null if no such block is
4199/// found.
4200///
4201BasicBlock *
4202ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4203  // If the block has a unique predecessor, then there is no path from the
4204  // predecessor to the block that does not go through the direct edge
4205  // from the predecessor to the block.
4206  if (BasicBlock *Pred = BB->getSinglePredecessor())
4207    return Pred;
4208
4209  // A loop's header is defined to be a block that dominates the loop.
4210  // If the header has a unique predecessor outside the loop, it must be
4211  // a block that has exactly one successor that can reach the loop.
4212  if (Loop *L = LI->getLoopFor(BB))
4213    return getLoopPredecessor(L);
4214
4215  return 0;
4216}
4217
4218/// HasSameValue - SCEV structural equivalence is usually sufficient for
4219/// testing whether two expressions are equal, however for the purposes of
4220/// looking for a condition guarding a loop, it can be useful to be a little
4221/// more general, since a front-end may have replicated the controlling
4222/// expression.
4223///
4224static bool HasSameValue(const SCEV *A, const SCEV *B) {
4225  // Quick check to see if they are the same SCEV.
4226  if (A == B) return true;
4227
4228  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4229  // two different instructions with the same value. Check for this case.
4230  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4231    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4232      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4233        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4234          if (AI->isIdenticalTo(BI))
4235            return true;
4236
4237  // Otherwise assume they may have a different value.
4238  return false;
4239}
4240
4241bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4242  return getSignedRange(S).getSignedMax().isNegative();
4243}
4244
4245bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4246  return getSignedRange(S).getSignedMin().isStrictlyPositive();
4247}
4248
4249bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4250  return !getSignedRange(S).getSignedMin().isNegative();
4251}
4252
4253bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4254  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4255}
4256
4257bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4258  return isKnownNegative(S) || isKnownPositive(S);
4259}
4260
4261bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4262                                       const SCEV *LHS, const SCEV *RHS) {
4263
4264  if (HasSameValue(LHS, RHS))
4265    return ICmpInst::isTrueWhenEqual(Pred);
4266
4267  switch (Pred) {
4268  default:
4269    assert(0 && "Unexpected ICmpInst::Predicate value!");
4270    break;
4271  case ICmpInst::ICMP_SGT:
4272    Pred = ICmpInst::ICMP_SLT;
4273    std::swap(LHS, RHS);
4274  case ICmpInst::ICMP_SLT: {
4275    ConstantRange LHSRange = getSignedRange(LHS);
4276    ConstantRange RHSRange = getSignedRange(RHS);
4277    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4278      return true;
4279    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4280      return false;
4281
4282    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4283    ConstantRange DiffRange = getUnsignedRange(Diff);
4284    if (isKnownNegative(Diff)) {
4285      if (DiffRange.getUnsignedMax().ult(LHSRange.getUnsignedMin()))
4286        return true;
4287      if (DiffRange.getUnsignedMin().uge(LHSRange.getUnsignedMax()))
4288        return false;
4289    } else if (isKnownPositive(Diff)) {
4290      if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin()))
4291        return true;
4292      if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax()))
4293        return false;
4294    }
4295    break;
4296  }
4297  case ICmpInst::ICMP_SGE:
4298    Pred = ICmpInst::ICMP_SLE;
4299    std::swap(LHS, RHS);
4300  case ICmpInst::ICMP_SLE: {
4301    ConstantRange LHSRange = getSignedRange(LHS);
4302    ConstantRange RHSRange = getSignedRange(RHS);
4303    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4304      return true;
4305    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4306      return false;
4307
4308    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4309    ConstantRange DiffRange = getUnsignedRange(Diff);
4310    if (isKnownNonPositive(Diff)) {
4311      if (DiffRange.getUnsignedMax().ule(LHSRange.getUnsignedMin()))
4312        return true;
4313      if (DiffRange.getUnsignedMin().ugt(LHSRange.getUnsignedMax()))
4314        return false;
4315    } else if (isKnownNonNegative(Diff)) {
4316      if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin()))
4317        return true;
4318      if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax()))
4319        return false;
4320    }
4321    break;
4322  }
4323  case ICmpInst::ICMP_UGT:
4324    Pred = ICmpInst::ICMP_ULT;
4325    std::swap(LHS, RHS);
4326  case ICmpInst::ICMP_ULT: {
4327    ConstantRange LHSRange = getUnsignedRange(LHS);
4328    ConstantRange RHSRange = getUnsignedRange(RHS);
4329    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4330      return true;
4331    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4332      return false;
4333
4334    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4335    ConstantRange DiffRange = getUnsignedRange(Diff);
4336    if (LHSRange.getUnsignedMax().ult(DiffRange.getUnsignedMin()))
4337      return true;
4338    if (LHSRange.getUnsignedMin().uge(DiffRange.getUnsignedMax()))
4339      return false;
4340    break;
4341  }
4342  case ICmpInst::ICMP_UGE:
4343    Pred = ICmpInst::ICMP_ULE;
4344    std::swap(LHS, RHS);
4345  case ICmpInst::ICMP_ULE: {
4346    ConstantRange LHSRange = getUnsignedRange(LHS);
4347    ConstantRange RHSRange = getUnsignedRange(RHS);
4348    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4349      return true;
4350    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4351      return false;
4352
4353    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4354    ConstantRange DiffRange = getUnsignedRange(Diff);
4355    if (LHSRange.getUnsignedMax().ule(DiffRange.getUnsignedMin()))
4356      return true;
4357    if (LHSRange.getUnsignedMin().ugt(DiffRange.getUnsignedMax()))
4358      return false;
4359    break;
4360  }
4361  case ICmpInst::ICMP_NE: {
4362    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4363      return true;
4364    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4365      return true;
4366
4367    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4368    if (isKnownNonZero(Diff))
4369      return true;
4370    break;
4371  }
4372  case ICmpInst::ICMP_EQ:
4373    break;
4374  }
4375  return false;
4376}
4377
4378/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4379/// protected by a conditional between LHS and RHS.  This is used to
4380/// to eliminate casts.
4381bool
4382ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4383                                             ICmpInst::Predicate Pred,
4384                                             const SCEV *LHS, const SCEV *RHS) {
4385  // Interpret a null as meaning no loop, where there is obviously no guard
4386  // (interprocedural conditions notwithstanding).
4387  if (!L) return true;
4388
4389  BasicBlock *Latch = L->getLoopLatch();
4390  if (!Latch)
4391    return false;
4392
4393  BranchInst *LoopContinuePredicate =
4394    dyn_cast<BranchInst>(Latch->getTerminator());
4395  if (!LoopContinuePredicate ||
4396      LoopContinuePredicate->isUnconditional())
4397    return false;
4398
4399  return
4400    isNecessaryCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4401                    LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4402}
4403
4404/// isLoopGuardedByCond - Test whether entry to the loop is protected
4405/// by a conditional between LHS and RHS.  This is used to help avoid max
4406/// expressions in loop trip counts, and to eliminate casts.
4407bool
4408ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4409                                     ICmpInst::Predicate Pred,
4410                                     const SCEV *LHS, const SCEV *RHS) {
4411  // Interpret a null as meaning no loop, where there is obviously no guard
4412  // (interprocedural conditions notwithstanding).
4413  if (!L) return false;
4414
4415  BasicBlock *Predecessor = getLoopPredecessor(L);
4416  BasicBlock *PredecessorDest = L->getHeader();
4417
4418  // Starting at the loop predecessor, climb up the predecessor chain, as long
4419  // as there are predecessors that can be found that have unique successors
4420  // leading to the original header.
4421  for (; Predecessor;
4422       PredecessorDest = Predecessor,
4423       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4424
4425    BranchInst *LoopEntryPredicate =
4426      dyn_cast<BranchInst>(Predecessor->getTerminator());
4427    if (!LoopEntryPredicate ||
4428        LoopEntryPredicate->isUnconditional())
4429      continue;
4430
4431    if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4432                        LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4433      return true;
4434  }
4435
4436  return false;
4437}
4438
4439/// isNecessaryCond - Test whether the condition described by Pred, LHS,
4440/// and RHS is a necessary condition for the given Cond value to evaluate
4441/// to true.
4442bool ScalarEvolution::isNecessaryCond(Value *CondValue,
4443                                      ICmpInst::Predicate Pred,
4444                                      const SCEV *LHS, const SCEV *RHS,
4445                                      bool Inverse) {
4446  // Recursivly handle And and Or conditions.
4447  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4448    if (BO->getOpcode() == Instruction::And) {
4449      if (!Inverse)
4450        return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4451               isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4452    } else if (BO->getOpcode() == Instruction::Or) {
4453      if (Inverse)
4454        return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4455               isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4456    }
4457  }
4458
4459  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4460  if (!ICI) return false;
4461
4462  // Now that we found a conditional branch that dominates the loop, check to
4463  // see if it is the comparison we are looking for.
4464  Value *PreCondLHS = ICI->getOperand(0);
4465  Value *PreCondRHS = ICI->getOperand(1);
4466  ICmpInst::Predicate FoundPred;
4467  if (Inverse)
4468    FoundPred = ICI->getInversePredicate();
4469  else
4470    FoundPred = ICI->getPredicate();
4471
4472  if (FoundPred == Pred)
4473    ; // An exact match.
4474  else if (!ICmpInst::isTrueWhenEqual(FoundPred) && Pred == ICmpInst::ICMP_NE) {
4475    // The actual condition is beyond sufficient.
4476    FoundPred = ICmpInst::ICMP_NE;
4477    // NE is symmetric but the original comparison may not be. Swap
4478    // the operands if necessary so that they match below.
4479    if (isa<SCEVConstant>(LHS))
4480      std::swap(PreCondLHS, PreCondRHS);
4481  } else
4482    // Check a few special cases.
4483    switch (FoundPred) {
4484    case ICmpInst::ICMP_UGT:
4485      if (Pred == ICmpInst::ICMP_ULT) {
4486        std::swap(PreCondLHS, PreCondRHS);
4487        FoundPred = ICmpInst::ICMP_ULT;
4488        break;
4489      }
4490      return false;
4491    case ICmpInst::ICMP_SGT:
4492      if (Pred == ICmpInst::ICMP_SLT) {
4493        std::swap(PreCondLHS, PreCondRHS);
4494        FoundPred = ICmpInst::ICMP_SLT;
4495        break;
4496      }
4497      return false;
4498    case ICmpInst::ICMP_NE:
4499      // Expressions like (x >u 0) are often canonicalized to (x != 0),
4500      // so check for this case by checking if the NE is comparing against
4501      // a minimum or maximum constant.
4502      if (!ICmpInst::isTrueWhenEqual(Pred))
4503        if (const SCEVConstant *C = dyn_cast<SCEVConstant>(RHS)) {
4504          const APInt &A = C->getValue()->getValue();
4505          switch (Pred) {
4506          case ICmpInst::ICMP_SLT:
4507            if (A.isMaxSignedValue()) break;
4508            return false;
4509          case ICmpInst::ICMP_SGT:
4510            if (A.isMinSignedValue()) break;
4511            return false;
4512          case ICmpInst::ICMP_ULT:
4513            if (A.isMaxValue()) break;
4514            return false;
4515          case ICmpInst::ICMP_UGT:
4516            if (A.isMinValue()) break;
4517            return false;
4518          default:
4519            return false;
4520          }
4521          FoundPred = Pred;
4522          // NE is symmetric but the original comparison may not be. Swap
4523          // the operands if necessary so that they match below.
4524          if (isa<SCEVConstant>(LHS))
4525            std::swap(PreCondLHS, PreCondRHS);
4526          break;
4527        }
4528      return false;
4529    default:
4530      // We weren't able to reconcile the condition.
4531      return false;
4532    }
4533
4534  assert(Pred == FoundPred && "Conditions were not reconciled!");
4535
4536  const SCEV *FoundLHS = getSCEV(PreCondLHS);
4537  const SCEV *FoundRHS = getSCEV(PreCondRHS);
4538
4539  // Balance the types.
4540  if (getTypeSizeInBits(LHS->getType()) >
4541      getTypeSizeInBits(FoundLHS->getType())) {
4542    if (CmpInst::isSigned(Pred)) {
4543      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4544      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4545    } else {
4546      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4547      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4548    }
4549  } else if (getTypeSizeInBits(LHS->getType()) <
4550             getTypeSizeInBits(FoundLHS->getType())) {
4551    // TODO: Cast LHS and RHS to FoundLHS' type. Currently this can
4552    // result in infinite recursion since the code to construct
4553    // cast expressions may want to know things about the loop
4554    // iteration in order to do simplifications.
4555    return false;
4556  }
4557
4558  return isNecessaryCondOperands(Pred, LHS, RHS,
4559                                 FoundLHS, FoundRHS) ||
4560         // ~x < ~y --> x > y
4561         isNecessaryCondOperands(Pred, LHS, RHS,
4562                                 getNotSCEV(FoundRHS), getNotSCEV(FoundLHS));
4563}
4564
4565/// isNecessaryCondOperands - Test whether the condition described by Pred,
4566/// LHS, and RHS is a necessary condition for the condition described by
4567/// Pred, FoundLHS, and FoundRHS to evaluate to true.
4568bool
4569ScalarEvolution::isNecessaryCondOperands(ICmpInst::Predicate Pred,
4570                                         const SCEV *LHS, const SCEV *RHS,
4571                                         const SCEV *FoundLHS,
4572                                         const SCEV *FoundRHS) {
4573  switch (Pred) {
4574  default: break;
4575  case ICmpInst::ICMP_SLT:
4576    if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
4577        isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
4578      return true;
4579    break;
4580  case ICmpInst::ICMP_SGT:
4581    if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
4582        isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
4583      return true;
4584    break;
4585  case ICmpInst::ICMP_ULT:
4586    if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
4587        isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
4588      return true;
4589    break;
4590  case ICmpInst::ICMP_UGT:
4591    if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
4592        isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
4593      return true;
4594    break;
4595  }
4596
4597  return false;
4598}
4599
4600/// getBECount - Subtract the end and start values and divide by the step,
4601/// rounding up, to get the number of times the backedge is executed. Return
4602/// CouldNotCompute if an intermediate computation overflows.
4603const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4604                                       const SCEV *End,
4605                                       const SCEV *Step) {
4606  const Type *Ty = Start->getType();
4607  const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4608  const SCEV *Diff = getMinusSCEV(End, Start);
4609  const SCEV *RoundUp = getAddExpr(Step, NegOne);
4610
4611  // Add an adjustment to the difference between End and Start so that
4612  // the division will effectively round up.
4613  const SCEV *Add = getAddExpr(Diff, RoundUp);
4614
4615  // Check Add for unsigned overflow.
4616  // TODO: More sophisticated things could be done here.
4617  const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1);
4618  const SCEV *OperandExtendedAdd =
4619    getAddExpr(getZeroExtendExpr(Diff, WideTy),
4620               getZeroExtendExpr(RoundUp, WideTy));
4621  if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4622    return getCouldNotCompute();
4623
4624  return getUDivExpr(Add, Step);
4625}
4626
4627/// HowManyLessThans - Return the number of times a backedge containing the
4628/// specified less-than comparison will execute.  If not computable, return
4629/// CouldNotCompute.
4630ScalarEvolution::BackedgeTakenInfo
4631ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4632                                  const Loop *L, bool isSigned) {
4633  // Only handle:  "ADDREC < LoopInvariant".
4634  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4635
4636  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4637  if (!AddRec || AddRec->getLoop() != L)
4638    return getCouldNotCompute();
4639
4640  if (AddRec->isAffine()) {
4641    // FORNOW: We only support unit strides.
4642    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4643    const SCEV *Step = AddRec->getStepRecurrence(*this);
4644
4645    // TODO: handle non-constant strides.
4646    const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4647    if (!CStep || CStep->isZero())
4648      return getCouldNotCompute();
4649    if (CStep->isOne()) {
4650      // With unit stride, the iteration never steps past the limit value.
4651    } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4652      if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4653        // Test whether a positive iteration iteration can step past the limit
4654        // value and past the maximum value for its type in a single step.
4655        if (isSigned) {
4656          APInt Max = APInt::getSignedMaxValue(BitWidth);
4657          if ((Max - CStep->getValue()->getValue())
4658                .slt(CLimit->getValue()->getValue()))
4659            return getCouldNotCompute();
4660        } else {
4661          APInt Max = APInt::getMaxValue(BitWidth);
4662          if ((Max - CStep->getValue()->getValue())
4663                .ult(CLimit->getValue()->getValue()))
4664            return getCouldNotCompute();
4665        }
4666      } else
4667        // TODO: handle non-constant limit values below.
4668        return getCouldNotCompute();
4669    } else
4670      // TODO: handle negative strides below.
4671      return getCouldNotCompute();
4672
4673    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4674    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
4675    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4676    // treat m-n as signed nor unsigned due to overflow possibility.
4677
4678    // First, we get the value of the LHS in the first iteration: n
4679    const SCEV *Start = AddRec->getOperand(0);
4680
4681    // Determine the minimum constant start value.
4682    const SCEV *MinStart = getConstant(isSigned ?
4683      getSignedRange(Start).getSignedMin() :
4684      getUnsignedRange(Start).getUnsignedMin());
4685
4686    // If we know that the condition is true in order to enter the loop,
4687    // then we know that it will run exactly (m-n)/s times. Otherwise, we
4688    // only know that it will execute (max(m,n)-n)/s times. In both cases,
4689    // the division must round up.
4690    const SCEV *End = RHS;
4691    if (!isLoopGuardedByCond(L,
4692                             isSigned ? ICmpInst::ICMP_SLT :
4693                                        ICmpInst::ICMP_ULT,
4694                             getMinusSCEV(Start, Step), RHS))
4695      End = isSigned ? getSMaxExpr(RHS, Start)
4696                     : getUMaxExpr(RHS, Start);
4697
4698    // Determine the maximum constant end value.
4699    const SCEV *MaxEnd = getConstant(isSigned ?
4700      getSignedRange(End).getSignedMax() :
4701      getUnsignedRange(End).getUnsignedMax());
4702
4703    // Finally, we subtract these two values and divide, rounding up, to get
4704    // the number of times the backedge is executed.
4705    const SCEV *BECount = getBECount(Start, End, Step);
4706
4707    // The maximum backedge count is similar, except using the minimum start
4708    // value and the maximum end value.
4709    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4710
4711    return BackedgeTakenInfo(BECount, MaxBECount);
4712  }
4713
4714  return getCouldNotCompute();
4715}
4716
4717/// getNumIterationsInRange - Return the number of iterations of this loop that
4718/// produce values in the specified constant range.  Another way of looking at
4719/// this is that it returns the first iteration number where the value is not in
4720/// the condition, thus computing the exit count. If the iteration count can't
4721/// be computed, an instance of SCEVCouldNotCompute is returned.
4722const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4723                                                    ScalarEvolution &SE) const {
4724  if (Range.isFullSet())  // Infinite loop.
4725    return SE.getCouldNotCompute();
4726
4727  // If the start is a non-zero constant, shift the range to simplify things.
4728  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4729    if (!SC->getValue()->isZero()) {
4730      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4731      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4732      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4733      if (const SCEVAddRecExpr *ShiftedAddRec =
4734            dyn_cast<SCEVAddRecExpr>(Shifted))
4735        return ShiftedAddRec->getNumIterationsInRange(
4736                           Range.subtract(SC->getValue()->getValue()), SE);
4737      // This is strange and shouldn't happen.
4738      return SE.getCouldNotCompute();
4739    }
4740
4741  // The only time we can solve this is when we have all constant indices.
4742  // Otherwise, we cannot determine the overflow conditions.
4743  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4744    if (!isa<SCEVConstant>(getOperand(i)))
4745      return SE.getCouldNotCompute();
4746
4747
4748  // Okay at this point we know that all elements of the chrec are constants and
4749  // that the start element is zero.
4750
4751  // First check to see if the range contains zero.  If not, the first
4752  // iteration exits.
4753  unsigned BitWidth = SE.getTypeSizeInBits(getType());
4754  if (!Range.contains(APInt(BitWidth, 0)))
4755    return SE.getIntegerSCEV(0, getType());
4756
4757  if (isAffine()) {
4758    // If this is an affine expression then we have this situation:
4759    //   Solve {0,+,A} in Range  ===  Ax in Range
4760
4761    // We know that zero is in the range.  If A is positive then we know that
4762    // the upper value of the range must be the first possible exit value.
4763    // If A is negative then the lower of the range is the last possible loop
4764    // value.  Also note that we already checked for a full range.
4765    APInt One(BitWidth,1);
4766    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4767    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4768
4769    // The exit value should be (End+A)/A.
4770    APInt ExitVal = (End + A).udiv(A);
4771    ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal);
4772
4773    // Evaluate at the exit value.  If we really did fall out of the valid
4774    // range, then we computed our trip count, otherwise wrap around or other
4775    // things must have happened.
4776    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4777    if (Range.contains(Val->getValue()))
4778      return SE.getCouldNotCompute();  // Something strange happened
4779
4780    // Ensure that the previous value is in the range.  This is a sanity check.
4781    assert(Range.contains(
4782           EvaluateConstantChrecAtConstant(this,
4783           SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) &&
4784           "Linear scev computation is off in a bad way!");
4785    return SE.getConstant(ExitValue);
4786  } else if (isQuadratic()) {
4787    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4788    // quadratic equation to solve it.  To do this, we must frame our problem in
4789    // terms of figuring out when zero is crossed, instead of when
4790    // Range.getUpper() is crossed.
4791    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4792    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4793    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4794
4795    // Next, solve the constructed addrec
4796    std::pair<const SCEV *,const SCEV *> Roots =
4797      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4798    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4799    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4800    if (R1) {
4801      // Pick the smallest positive root value.
4802      if (ConstantInt *CB =
4803          dyn_cast<ConstantInt>(
4804                       SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT,
4805                         R1->getValue(), R2->getValue()))) {
4806        if (CB->getZExtValue() == false)
4807          std::swap(R1, R2);   // R1 is the minimum root now.
4808
4809        // Make sure the root is not off by one.  The returned iteration should
4810        // not be in the range, but the previous one should be.  When solving
4811        // for "X*X < 5", for example, we should not return a root of 2.
4812        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4813                                                             R1->getValue(),
4814                                                             SE);
4815        if (Range.contains(R1Val->getValue())) {
4816          // The next iteration must be out of the range...
4817          ConstantInt *NextVal =
4818                 SE.getContext()->getConstantInt(R1->getValue()->getValue()+1);
4819
4820          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4821          if (!Range.contains(R1Val->getValue()))
4822            return SE.getConstant(NextVal);
4823          return SE.getCouldNotCompute();  // Something strange happened
4824        }
4825
4826        // If R1 was not in the range, then it is a good return value.  Make
4827        // sure that R1-1 WAS in the range though, just in case.
4828        ConstantInt *NextVal =
4829                 SE.getContext()->getConstantInt(R1->getValue()->getValue()-1);
4830        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4831        if (Range.contains(R1Val->getValue()))
4832          return R1;
4833        return SE.getCouldNotCompute();  // Something strange happened
4834      }
4835    }
4836  }
4837
4838  return SE.getCouldNotCompute();
4839}
4840
4841
4842
4843//===----------------------------------------------------------------------===//
4844//                   SCEVCallbackVH Class Implementation
4845//===----------------------------------------------------------------------===//
4846
4847void ScalarEvolution::SCEVCallbackVH::deleted() {
4848  assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4849  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4850    SE->ConstantEvolutionLoopExitValue.erase(PN);
4851  if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4852    SE->ValuesAtScopes.erase(I);
4853  SE->Scalars.erase(getValPtr());
4854  // this now dangles!
4855}
4856
4857void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4858  assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4859
4860  // Forget all the expressions associated with users of the old value,
4861  // so that future queries will recompute the expressions using the new
4862  // value.
4863  SmallVector<User *, 16> Worklist;
4864  Value *Old = getValPtr();
4865  bool DeleteOld = false;
4866  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4867       UI != UE; ++UI)
4868    Worklist.push_back(*UI);
4869  while (!Worklist.empty()) {
4870    User *U = Worklist.pop_back_val();
4871    // Deleting the Old value will cause this to dangle. Postpone
4872    // that until everything else is done.
4873    if (U == Old) {
4874      DeleteOld = true;
4875      continue;
4876    }
4877    if (PHINode *PN = dyn_cast<PHINode>(U))
4878      SE->ConstantEvolutionLoopExitValue.erase(PN);
4879    if (Instruction *I = dyn_cast<Instruction>(U))
4880      SE->ValuesAtScopes.erase(I);
4881    if (SE->Scalars.erase(U))
4882      for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4883           UI != UE; ++UI)
4884        Worklist.push_back(*UI);
4885  }
4886  if (DeleteOld) {
4887    if (PHINode *PN = dyn_cast<PHINode>(Old))
4888      SE->ConstantEvolutionLoopExitValue.erase(PN);
4889    if (Instruction *I = dyn_cast<Instruction>(Old))
4890      SE->ValuesAtScopes.erase(I);
4891    SE->Scalars.erase(Old);
4892    // this now dangles!
4893  }
4894  // this may dangle!
4895}
4896
4897ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4898  : CallbackVH(V), SE(se) {}
4899
4900//===----------------------------------------------------------------------===//
4901//                   ScalarEvolution Class Implementation
4902//===----------------------------------------------------------------------===//
4903
4904ScalarEvolution::ScalarEvolution()
4905  : FunctionPass(&ID) {
4906}
4907
4908bool ScalarEvolution::runOnFunction(Function &F) {
4909  this->F = &F;
4910  LI = &getAnalysis<LoopInfo>();
4911  TD = getAnalysisIfAvailable<TargetData>();
4912  return false;
4913}
4914
4915void ScalarEvolution::releaseMemory() {
4916  Scalars.clear();
4917  BackedgeTakenCounts.clear();
4918  ConstantEvolutionLoopExitValue.clear();
4919  ValuesAtScopes.clear();
4920  UniqueSCEVs.clear();
4921  SCEVAllocator.Reset();
4922}
4923
4924void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4925  AU.setPreservesAll();
4926  AU.addRequiredTransitive<LoopInfo>();
4927}
4928
4929bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4930  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4931}
4932
4933static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4934                          const Loop *L) {
4935  // Print all inner loops first
4936  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4937    PrintLoopInfo(OS, SE, *I);
4938
4939  OS << "Loop " << L->getHeader()->getName() << ": ";
4940
4941  SmallVector<BasicBlock*, 8> ExitBlocks;
4942  L->getExitBlocks(ExitBlocks);
4943  if (ExitBlocks.size() != 1)
4944    OS << "<multiple exits> ";
4945
4946  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4947    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4948  } else {
4949    OS << "Unpredictable backedge-taken count. ";
4950  }
4951
4952  OS << "\n";
4953  OS << "Loop " << L->getHeader()->getName() << ": ";
4954
4955  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4956    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4957  } else {
4958    OS << "Unpredictable max backedge-taken count. ";
4959  }
4960
4961  OS << "\n";
4962}
4963
4964void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4965  // ScalarEvolution's implementaiton of the print method is to print
4966  // out SCEV values of all instructions that are interesting. Doing
4967  // this potentially causes it to create new SCEV objects though,
4968  // which technically conflicts with the const qualifier. This isn't
4969  // observable from outside the class though (the hasSCEV function
4970  // notwithstanding), so casting away the const isn't dangerous.
4971  ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4972
4973  OS << "Classifying expressions for: " << F->getName() << "\n";
4974  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4975    if (isSCEVable(I->getType())) {
4976      OS << *I;
4977      OS << "  -->  ";
4978      const SCEV *SV = SE.getSCEV(&*I);
4979      SV->print(OS);
4980
4981      const Loop *L = LI->getLoopFor((*I).getParent());
4982
4983      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
4984      if (AtUse != SV) {
4985        OS << "  -->  ";
4986        AtUse->print(OS);
4987      }
4988
4989      if (L) {
4990        OS << "\t\t" "Exits: ";
4991        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
4992        if (!ExitValue->isLoopInvariant(L)) {
4993          OS << "<<Unknown>>";
4994        } else {
4995          OS << *ExitValue;
4996        }
4997      }
4998
4999      OS << "\n";
5000    }
5001
5002  OS << "Determining loop execution counts for: " << F->getName() << "\n";
5003  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5004    PrintLoopInfo(OS, &SE, *I);
5005}
5006
5007void ScalarEvolution::print(std::ostream &o, const Module *M) const {
5008  raw_os_ostream OS(o);
5009  print(OS, M);
5010}
5011