ScalarEvolution.cpp revision 7c0fd8eb724a7228a6cf7e3e5487614c25202a91
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle. We only create one SCEV of a particular shape, so
18// pointer-comparisons for equality are legal.
19//
20// One important aspect of the SCEV objects is that they are never cyclic, even
21// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23// recurrence) then we represent it directly as a recurrence node, otherwise we
24// represent it as a SCEVUnknown node.
25//
26// In addition to being able to represent expressions of various types, we also
27// have folders that are used to build the *canonical* representation for a
28// particular expression.  These folders are capable of using a variety of
29// rewrite rules to simplify the expressions.
30//
31// Once the folders are defined, we can implement the more interesting
32// higher-level code, such as the code that recognizes PHI nodes of various
33// types, computes the execution count of a loop, etc.
34//
35// TODO: We should use these routines and value representations to implement
36// dependence analysis!
37//
38//===----------------------------------------------------------------------===//
39//
40// There are several good references for the techniques used in this analysis.
41//
42//  Chains of recurrences -- a method to expedite the evaluation
43//  of closed-form functions
44//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45//
46//  On computational properties of chains of recurrences
47//  Eugene V. Zima
48//
49//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50//  Robert A. van Engelen
51//
52//  Efficient Symbolic Analysis for Optimizing Compilers
53//  Robert A. van Engelen
54//
55//  Using the chains of recurrences algebra for data dependence testing and
56//  induction variable substitution
57//  MS Thesis, Johnie Birch
58//
59//===----------------------------------------------------------------------===//
60
61#define DEBUG_TYPE "scalar-evolution"
62#include "llvm/Analysis/ScalarEvolutionExpressions.h"
63#include "llvm/Constants.h"
64#include "llvm/DerivedTypes.h"
65#include "llvm/GlobalVariable.h"
66#include "llvm/GlobalAlias.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Operator.h"
70#include "llvm/Analysis/ConstantFolding.h"
71#include "llvm/Analysis/Dominators.h"
72#include "llvm/Analysis/InstructionSimplify.h"
73#include "llvm/Analysis/LoopInfo.h"
74#include "llvm/Analysis/ValueTracking.h"
75#include "llvm/Assembly/Writer.h"
76#include "llvm/Target/TargetData.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/ConstantRange.h"
79#include "llvm/Support/Debug.h"
80#include "llvm/Support/ErrorHandling.h"
81#include "llvm/Support/GetElementPtrTypeIterator.h"
82#include "llvm/Support/InstIterator.h"
83#include "llvm/Support/MathExtras.h"
84#include "llvm/Support/raw_ostream.h"
85#include "llvm/ADT/Statistic.h"
86#include "llvm/ADT/STLExtras.h"
87#include "llvm/ADT/SmallPtrSet.h"
88#include <algorithm>
89using namespace llvm;
90
91STATISTIC(NumArrayLenItCounts,
92          "Number of trip counts computed with array length");
93STATISTIC(NumTripCountsComputed,
94          "Number of loops with predictable loop counts");
95STATISTIC(NumTripCountsNotComputed,
96          "Number of loops without predictable loop counts");
97STATISTIC(NumBruteForceTripCountsComputed,
98          "Number of loops with trip counts computed by force");
99
100static cl::opt<unsigned>
101MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
102                        cl::desc("Maximum number of iterations SCEV will "
103                                 "symbolically execute a constant "
104                                 "derived loop"),
105                        cl::init(100));
106
107INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
108                "Scalar Evolution Analysis", false, true)
109INITIALIZE_PASS_DEPENDENCY(LoopInfo)
110INITIALIZE_PASS_DEPENDENCY(DominatorTree)
111INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
112                "Scalar Evolution Analysis", false, true)
113char ScalarEvolution::ID = 0;
114
115//===----------------------------------------------------------------------===//
116//                           SCEV class definitions
117//===----------------------------------------------------------------------===//
118
119//===----------------------------------------------------------------------===//
120// Implementation of the SCEV class.
121//
122
123SCEV::~SCEV() {}
124
125void SCEV::dump() const {
126  print(dbgs());
127  dbgs() << '\n';
128}
129
130bool SCEV::isZero() const {
131  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
132    return SC->getValue()->isZero();
133  return false;
134}
135
136bool SCEV::isOne() const {
137  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
138    return SC->getValue()->isOne();
139  return false;
140}
141
142bool SCEV::isAllOnesValue() const {
143  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
144    return SC->getValue()->isAllOnesValue();
145  return false;
146}
147
148SCEVCouldNotCompute::SCEVCouldNotCompute() :
149  SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
150
151bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
152  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153  return false;
154}
155
156const Type *SCEVCouldNotCompute::getType() const {
157  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158  return 0;
159}
160
161bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
162  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
163  return false;
164}
165
166bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
167  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
168  return false;
169}
170
171void SCEVCouldNotCompute::print(raw_ostream &OS) const {
172  OS << "***COULDNOTCOMPUTE***";
173}
174
175bool SCEVCouldNotCompute::classof(const SCEV *S) {
176  return S->getSCEVType() == scCouldNotCompute;
177}
178
179const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
180  FoldingSetNodeID ID;
181  ID.AddInteger(scConstant);
182  ID.AddPointer(V);
183  void *IP = 0;
184  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
185  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
186  UniqueSCEVs.InsertNode(S, IP);
187  return S;
188}
189
190const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
191  return getConstant(ConstantInt::get(getContext(), Val));
192}
193
194const SCEV *
195ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
196  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
197  return getConstant(ConstantInt::get(ITy, V, isSigned));
198}
199
200const Type *SCEVConstant::getType() const { return V->getType(); }
201
202void SCEVConstant::print(raw_ostream &OS) const {
203  WriteAsOperand(OS, V, false);
204}
205
206SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
207                           unsigned SCEVTy, const SCEV *op, const Type *ty)
208  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
209
210bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
211  return Op->dominates(BB, DT);
212}
213
214bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
215  return Op->properlyDominates(BB, DT);
216}
217
218SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
219                                   const SCEV *op, const Type *ty)
220  : SCEVCastExpr(ID, scTruncate, op, ty) {
221  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
222         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
223         "Cannot truncate non-integer value!");
224}
225
226void SCEVTruncateExpr::print(raw_ostream &OS) const {
227  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
228}
229
230SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
231                                       const SCEV *op, const Type *ty)
232  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
233  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
234         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
235         "Cannot zero extend non-integer value!");
236}
237
238void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
239  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
240}
241
242SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
243                                       const SCEV *op, const Type *ty)
244  : SCEVCastExpr(ID, scSignExtend, op, ty) {
245  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
246         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
247         "Cannot sign extend non-integer value!");
248}
249
250void SCEVSignExtendExpr::print(raw_ostream &OS) const {
251  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
252}
253
254void SCEVCommutativeExpr::print(raw_ostream &OS) const {
255  const char *OpStr = getOperationStr();
256  OS << "(";
257  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
258    OS << **I;
259    if (llvm::next(I) != E)
260      OS << OpStr;
261  }
262  OS << ")";
263}
264
265bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
266  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
267    if (!(*I)->dominates(BB, DT))
268      return false;
269  return true;
270}
271
272bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
273  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
274    if (!(*I)->properlyDominates(BB, DT))
275      return false;
276  return true;
277}
278
279bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const {
280  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
281    if (!(*I)->isLoopInvariant(L))
282      return false;
283  return true;
284}
285
286// hasComputableLoopEvolution - N-ary expressions have computable loop
287// evolutions iff they have at least one operand that varies with the loop,
288// but that all varying operands are computable.
289bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const {
290  bool HasVarying = false;
291  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
292    const SCEV *S = *I;
293    if (!S->isLoopInvariant(L)) {
294      if (S->hasComputableLoopEvolution(L))
295        HasVarying = true;
296      else
297        return false;
298    }
299  }
300  return HasVarying;
301}
302
303bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
304  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
305    const SCEV *S = *I;
306    if (O == S || S->hasOperand(O))
307      return true;
308  }
309  return false;
310}
311
312bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
313  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
314}
315
316bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
317  return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
318}
319
320void SCEVUDivExpr::print(raw_ostream &OS) const {
321  OS << "(" << *LHS << " /u " << *RHS << ")";
322}
323
324const Type *SCEVUDivExpr::getType() const {
325  // In most cases the types of LHS and RHS will be the same, but in some
326  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
327  // depend on the type for correctness, but handling types carefully can
328  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
329  // a pointer type than the RHS, so use the RHS' type here.
330  return RHS->getType();
331}
332
333bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
334  // Add recurrences are never invariant in the function-body (null loop).
335  if (!QueryLoop)
336    return false;
337
338  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
339  if (QueryLoop->contains(L))
340    return false;
341
342  // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop.
343  if (L->contains(QueryLoop))
344    return true;
345
346  // This recurrence is variant w.r.t. QueryLoop if any of its operands
347  // are variant.
348  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
349    if (!(*I)->isLoopInvariant(QueryLoop))
350      return false;
351
352  // Otherwise it's loop-invariant.
353  return true;
354}
355
356bool
357SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
358  return DT->dominates(L->getHeader(), BB) &&
359         SCEVNAryExpr::dominates(BB, DT);
360}
361
362bool
363SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
364  // This uses a "dominates" query instead of "properly dominates" query because
365  // the instruction which produces the addrec's value is a PHI, and a PHI
366  // effectively properly dominates its entire containing block.
367  return DT->dominates(L->getHeader(), BB) &&
368         SCEVNAryExpr::properlyDominates(BB, DT);
369}
370
371void SCEVAddRecExpr::print(raw_ostream &OS) const {
372  OS << "{" << *Operands[0];
373  for (unsigned i = 1, e = NumOperands; i != e; ++i)
374    OS << ",+," << *Operands[i];
375  OS << "}<";
376  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
377  OS << ">";
378}
379
380void SCEVUnknown::deleted() {
381  // Clear this SCEVUnknown from various maps.
382  SE->ValuesAtScopes.erase(this);
383  SE->UnsignedRanges.erase(this);
384  SE->SignedRanges.erase(this);
385
386  // Remove this SCEVUnknown from the uniquing map.
387  SE->UniqueSCEVs.RemoveNode(this);
388
389  // Release the value.
390  setValPtr(0);
391}
392
393void SCEVUnknown::allUsesReplacedWith(Value *New) {
394  // Clear this SCEVUnknown from various maps.
395  SE->ValuesAtScopes.erase(this);
396  SE->UnsignedRanges.erase(this);
397  SE->SignedRanges.erase(this);
398
399  // Remove this SCEVUnknown from the uniquing map.
400  SE->UniqueSCEVs.RemoveNode(this);
401
402  // Update this SCEVUnknown to point to the new value. This is needed
403  // because there may still be outstanding SCEVs which still point to
404  // this SCEVUnknown.
405  setValPtr(New);
406}
407
408bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
409  // All non-instruction values are loop invariant.  All instructions are loop
410  // invariant if they are not contained in the specified loop.
411  // Instructions are never considered invariant in the function body
412  // (null loop) because they are defined within the "loop".
413  if (Instruction *I = dyn_cast<Instruction>(getValue()))
414    return L && !L->contains(I);
415  return true;
416}
417
418bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
419  if (Instruction *I = dyn_cast<Instruction>(getValue()))
420    return DT->dominates(I->getParent(), BB);
421  return true;
422}
423
424bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
425  if (Instruction *I = dyn_cast<Instruction>(getValue()))
426    return DT->properlyDominates(I->getParent(), BB);
427  return true;
428}
429
430const Type *SCEVUnknown::getType() const {
431  return getValue()->getType();
432}
433
434bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
435  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
436    if (VCE->getOpcode() == Instruction::PtrToInt)
437      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
438        if (CE->getOpcode() == Instruction::GetElementPtr &&
439            CE->getOperand(0)->isNullValue() &&
440            CE->getNumOperands() == 2)
441          if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
442            if (CI->isOne()) {
443              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
444                                 ->getElementType();
445              return true;
446            }
447
448  return false;
449}
450
451bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
452  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
453    if (VCE->getOpcode() == Instruction::PtrToInt)
454      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
455        if (CE->getOpcode() == Instruction::GetElementPtr &&
456            CE->getOperand(0)->isNullValue()) {
457          const Type *Ty =
458            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
459          if (const StructType *STy = dyn_cast<StructType>(Ty))
460            if (!STy->isPacked() &&
461                CE->getNumOperands() == 3 &&
462                CE->getOperand(1)->isNullValue()) {
463              if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
464                if (CI->isOne() &&
465                    STy->getNumElements() == 2 &&
466                    STy->getElementType(0)->isIntegerTy(1)) {
467                  AllocTy = STy->getElementType(1);
468                  return true;
469                }
470            }
471        }
472
473  return false;
474}
475
476bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
477  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
478    if (VCE->getOpcode() == Instruction::PtrToInt)
479      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
480        if (CE->getOpcode() == Instruction::GetElementPtr &&
481            CE->getNumOperands() == 3 &&
482            CE->getOperand(0)->isNullValue() &&
483            CE->getOperand(1)->isNullValue()) {
484          const Type *Ty =
485            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
486          // Ignore vector types here so that ScalarEvolutionExpander doesn't
487          // emit getelementptrs that index into vectors.
488          if (Ty->isStructTy() || Ty->isArrayTy()) {
489            CTy = Ty;
490            FieldNo = CE->getOperand(2);
491            return true;
492          }
493        }
494
495  return false;
496}
497
498void SCEVUnknown::print(raw_ostream &OS) const {
499  const Type *AllocTy;
500  if (isSizeOf(AllocTy)) {
501    OS << "sizeof(" << *AllocTy << ")";
502    return;
503  }
504  if (isAlignOf(AllocTy)) {
505    OS << "alignof(" << *AllocTy << ")";
506    return;
507  }
508
509  const Type *CTy;
510  Constant *FieldNo;
511  if (isOffsetOf(CTy, FieldNo)) {
512    OS << "offsetof(" << *CTy << ", ";
513    WriteAsOperand(OS, FieldNo, false);
514    OS << ")";
515    return;
516  }
517
518  // Otherwise just print it normally.
519  WriteAsOperand(OS, getValue(), false);
520}
521
522//===----------------------------------------------------------------------===//
523//                               SCEV Utilities
524//===----------------------------------------------------------------------===//
525
526namespace {
527  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
528  /// than the complexity of the RHS.  This comparator is used to canonicalize
529  /// expressions.
530  class SCEVComplexityCompare {
531    const LoopInfo *const LI;
532  public:
533    explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
534
535    // Return true or false if LHS is less than, or at least RHS, respectively.
536    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
537      return compare(LHS, RHS) < 0;
538    }
539
540    // Return negative, zero, or positive, if LHS is less than, equal to, or
541    // greater than RHS, respectively. A three-way result allows recursive
542    // comparisons to be more efficient.
543    int compare(const SCEV *LHS, const SCEV *RHS) const {
544      // Fast-path: SCEVs are uniqued so we can do a quick equality check.
545      if (LHS == RHS)
546        return 0;
547
548      // Primarily, sort the SCEVs by their getSCEVType().
549      unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
550      if (LType != RType)
551        return (int)LType - (int)RType;
552
553      // Aside from the getSCEVType() ordering, the particular ordering
554      // isn't very important except that it's beneficial to be consistent,
555      // so that (a + b) and (b + a) don't end up as different expressions.
556      switch (LType) {
557      case scUnknown: {
558        const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
559        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
560
561        // Sort SCEVUnknown values with some loose heuristics. TODO: This is
562        // not as complete as it could be.
563        const Value *LV = LU->getValue(), *RV = RU->getValue();
564
565        // Order pointer values after integer values. This helps SCEVExpander
566        // form GEPs.
567        bool LIsPointer = LV->getType()->isPointerTy(),
568             RIsPointer = RV->getType()->isPointerTy();
569        if (LIsPointer != RIsPointer)
570          return (int)LIsPointer - (int)RIsPointer;
571
572        // Compare getValueID values.
573        unsigned LID = LV->getValueID(),
574                 RID = RV->getValueID();
575        if (LID != RID)
576          return (int)LID - (int)RID;
577
578        // Sort arguments by their position.
579        if (const Argument *LA = dyn_cast<Argument>(LV)) {
580          const Argument *RA = cast<Argument>(RV);
581          unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
582          return (int)LArgNo - (int)RArgNo;
583        }
584
585        // For instructions, compare their loop depth, and their operand
586        // count.  This is pretty loose.
587        if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
588          const Instruction *RInst = cast<Instruction>(RV);
589
590          // Compare loop depths.
591          const BasicBlock *LParent = LInst->getParent(),
592                           *RParent = RInst->getParent();
593          if (LParent != RParent) {
594            unsigned LDepth = LI->getLoopDepth(LParent),
595                     RDepth = LI->getLoopDepth(RParent);
596            if (LDepth != RDepth)
597              return (int)LDepth - (int)RDepth;
598          }
599
600          // Compare the number of operands.
601          unsigned LNumOps = LInst->getNumOperands(),
602                   RNumOps = RInst->getNumOperands();
603          return (int)LNumOps - (int)RNumOps;
604        }
605
606        return 0;
607      }
608
609      case scConstant: {
610        const SCEVConstant *LC = cast<SCEVConstant>(LHS);
611        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
612
613        // Compare constant values.
614        const APInt &LA = LC->getValue()->getValue();
615        const APInt &RA = RC->getValue()->getValue();
616        unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
617        if (LBitWidth != RBitWidth)
618          return (int)LBitWidth - (int)RBitWidth;
619        return LA.ult(RA) ? -1 : 1;
620      }
621
622      case scAddRecExpr: {
623        const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
624        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
625
626        // Compare addrec loop depths.
627        const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
628        if (LLoop != RLoop) {
629          unsigned LDepth = LLoop->getLoopDepth(),
630                   RDepth = RLoop->getLoopDepth();
631          if (LDepth != RDepth)
632            return (int)LDepth - (int)RDepth;
633        }
634
635        // Addrec complexity grows with operand count.
636        unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
637        if (LNumOps != RNumOps)
638          return (int)LNumOps - (int)RNumOps;
639
640        // Lexicographically compare.
641        for (unsigned i = 0; i != LNumOps; ++i) {
642          long X = compare(LA->getOperand(i), RA->getOperand(i));
643          if (X != 0)
644            return X;
645        }
646
647        return 0;
648      }
649
650      case scAddExpr:
651      case scMulExpr:
652      case scSMaxExpr:
653      case scUMaxExpr: {
654        const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
655        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
656
657        // Lexicographically compare n-ary expressions.
658        unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
659        for (unsigned i = 0; i != LNumOps; ++i) {
660          if (i >= RNumOps)
661            return 1;
662          long X = compare(LC->getOperand(i), RC->getOperand(i));
663          if (X != 0)
664            return X;
665        }
666        return (int)LNumOps - (int)RNumOps;
667      }
668
669      case scUDivExpr: {
670        const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
671        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
672
673        // Lexicographically compare udiv expressions.
674        long X = compare(LC->getLHS(), RC->getLHS());
675        if (X != 0)
676          return X;
677        return compare(LC->getRHS(), RC->getRHS());
678      }
679
680      case scTruncate:
681      case scZeroExtend:
682      case scSignExtend: {
683        const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
684        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
685
686        // Compare cast expressions by operand.
687        return compare(LC->getOperand(), RC->getOperand());
688      }
689
690      default:
691        break;
692      }
693
694      llvm_unreachable("Unknown SCEV kind!");
695      return 0;
696    }
697  };
698}
699
700/// GroupByComplexity - Given a list of SCEV objects, order them by their
701/// complexity, and group objects of the same complexity together by value.
702/// When this routine is finished, we know that any duplicates in the vector are
703/// consecutive and that complexity is monotonically increasing.
704///
705/// Note that we go take special precautions to ensure that we get deterministic
706/// results from this routine.  In other words, we don't want the results of
707/// this to depend on where the addresses of various SCEV objects happened to
708/// land in memory.
709///
710static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
711                              LoopInfo *LI) {
712  if (Ops.size() < 2) return;  // Noop
713  if (Ops.size() == 2) {
714    // This is the common case, which also happens to be trivially simple.
715    // Special case it.
716    const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
717    if (SCEVComplexityCompare(LI)(RHS, LHS))
718      std::swap(LHS, RHS);
719    return;
720  }
721
722  // Do the rough sort by complexity.
723  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
724
725  // Now that we are sorted by complexity, group elements of the same
726  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
727  // be extremely short in practice.  Note that we take this approach because we
728  // do not want to depend on the addresses of the objects we are grouping.
729  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
730    const SCEV *S = Ops[i];
731    unsigned Complexity = S->getSCEVType();
732
733    // If there are any objects of the same complexity and same value as this
734    // one, group them.
735    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
736      if (Ops[j] == S) { // Found a duplicate.
737        // Move it to immediately after i'th element.
738        std::swap(Ops[i+1], Ops[j]);
739        ++i;   // no need to rescan it.
740        if (i == e-2) return;  // Done!
741      }
742    }
743  }
744}
745
746
747
748//===----------------------------------------------------------------------===//
749//                      Simple SCEV method implementations
750//===----------------------------------------------------------------------===//
751
752/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
753/// Assume, K > 0.
754static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
755                                       ScalarEvolution &SE,
756                                       const Type* ResultTy) {
757  // Handle the simplest case efficiently.
758  if (K == 1)
759    return SE.getTruncateOrZeroExtend(It, ResultTy);
760
761  // We are using the following formula for BC(It, K):
762  //
763  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
764  //
765  // Suppose, W is the bitwidth of the return value.  We must be prepared for
766  // overflow.  Hence, we must assure that the result of our computation is
767  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
768  // safe in modular arithmetic.
769  //
770  // However, this code doesn't use exactly that formula; the formula it uses
771  // is something like the following, where T is the number of factors of 2 in
772  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
773  // exponentiation:
774  //
775  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
776  //
777  // This formula is trivially equivalent to the previous formula.  However,
778  // this formula can be implemented much more efficiently.  The trick is that
779  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
780  // arithmetic.  To do exact division in modular arithmetic, all we have
781  // to do is multiply by the inverse.  Therefore, this step can be done at
782  // width W.
783  //
784  // The next issue is how to safely do the division by 2^T.  The way this
785  // is done is by doing the multiplication step at a width of at least W + T
786  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
787  // when we perform the division by 2^T (which is equivalent to a right shift
788  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
789  // truncated out after the division by 2^T.
790  //
791  // In comparison to just directly using the first formula, this technique
792  // is much more efficient; using the first formula requires W * K bits,
793  // but this formula less than W + K bits. Also, the first formula requires
794  // a division step, whereas this formula only requires multiplies and shifts.
795  //
796  // It doesn't matter whether the subtraction step is done in the calculation
797  // width or the input iteration count's width; if the subtraction overflows,
798  // the result must be zero anyway.  We prefer here to do it in the width of
799  // the induction variable because it helps a lot for certain cases; CodeGen
800  // isn't smart enough to ignore the overflow, which leads to much less
801  // efficient code if the width of the subtraction is wider than the native
802  // register width.
803  //
804  // (It's possible to not widen at all by pulling out factors of 2 before
805  // the multiplication; for example, K=2 can be calculated as
806  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
807  // extra arithmetic, so it's not an obvious win, and it gets
808  // much more complicated for K > 3.)
809
810  // Protection from insane SCEVs; this bound is conservative,
811  // but it probably doesn't matter.
812  if (K > 1000)
813    return SE.getCouldNotCompute();
814
815  unsigned W = SE.getTypeSizeInBits(ResultTy);
816
817  // Calculate K! / 2^T and T; we divide out the factors of two before
818  // multiplying for calculating K! / 2^T to avoid overflow.
819  // Other overflow doesn't matter because we only care about the bottom
820  // W bits of the result.
821  APInt OddFactorial(W, 1);
822  unsigned T = 1;
823  for (unsigned i = 3; i <= K; ++i) {
824    APInt Mult(W, i);
825    unsigned TwoFactors = Mult.countTrailingZeros();
826    T += TwoFactors;
827    Mult = Mult.lshr(TwoFactors);
828    OddFactorial *= Mult;
829  }
830
831  // We need at least W + T bits for the multiplication step
832  unsigned CalculationBits = W + T;
833
834  // Calculate 2^T, at width T+W.
835  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
836
837  // Calculate the multiplicative inverse of K! / 2^T;
838  // this multiplication factor will perform the exact division by
839  // K! / 2^T.
840  APInt Mod = APInt::getSignedMinValue(W+1);
841  APInt MultiplyFactor = OddFactorial.zext(W+1);
842  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
843  MultiplyFactor = MultiplyFactor.trunc(W);
844
845  // Calculate the product, at width T+W
846  const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
847                                                      CalculationBits);
848  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
849  for (unsigned i = 1; i != K; ++i) {
850    const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
851    Dividend = SE.getMulExpr(Dividend,
852                             SE.getTruncateOrZeroExtend(S, CalculationTy));
853  }
854
855  // Divide by 2^T
856  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
857
858  // Truncate the result, and divide by K! / 2^T.
859
860  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
861                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
862}
863
864/// evaluateAtIteration - Return the value of this chain of recurrences at
865/// the specified iteration number.  We can evaluate this recurrence by
866/// multiplying each element in the chain by the binomial coefficient
867/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
868///
869///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
870///
871/// where BC(It, k) stands for binomial coefficient.
872///
873const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
874                                                ScalarEvolution &SE) const {
875  const SCEV *Result = getStart();
876  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
877    // The computation is correct in the face of overflow provided that the
878    // multiplication is performed _after_ the evaluation of the binomial
879    // coefficient.
880    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
881    if (isa<SCEVCouldNotCompute>(Coeff))
882      return Coeff;
883
884    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
885  }
886  return Result;
887}
888
889//===----------------------------------------------------------------------===//
890//                    SCEV Expression folder implementations
891//===----------------------------------------------------------------------===//
892
893const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
894                                             const Type *Ty) {
895  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
896         "This is not a truncating conversion!");
897  assert(isSCEVable(Ty) &&
898         "This is not a conversion to a SCEVable type!");
899  Ty = getEffectiveSCEVType(Ty);
900
901  FoldingSetNodeID ID;
902  ID.AddInteger(scTruncate);
903  ID.AddPointer(Op);
904  ID.AddPointer(Ty);
905  void *IP = 0;
906  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
907
908  // Fold if the operand is constant.
909  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
910    return getConstant(
911      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
912                                               getEffectiveSCEVType(Ty))));
913
914  // trunc(trunc(x)) --> trunc(x)
915  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
916    return getTruncateExpr(ST->getOperand(), Ty);
917
918  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
919  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
920    return getTruncateOrSignExtend(SS->getOperand(), Ty);
921
922  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
923  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
924    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
925
926  // If the input value is a chrec scev, truncate the chrec's operands.
927  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
928    SmallVector<const SCEV *, 4> Operands;
929    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
930      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
931    return getAddRecExpr(Operands, AddRec->getLoop());
932  }
933
934  // As a special case, fold trunc(undef) to undef. We don't want to
935  // know too much about SCEVUnknowns, but this special case is handy
936  // and harmless.
937  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
938    if (isa<UndefValue>(U->getValue()))
939      return getSCEV(UndefValue::get(Ty));
940
941  // The cast wasn't folded; create an explicit cast node. We can reuse
942  // the existing insert position since if we get here, we won't have
943  // made any changes which would invalidate it.
944  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
945                                                 Op, Ty);
946  UniqueSCEVs.InsertNode(S, IP);
947  return S;
948}
949
950const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
951                                               const Type *Ty) {
952  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
953         "This is not an extending conversion!");
954  assert(isSCEVable(Ty) &&
955         "This is not a conversion to a SCEVable type!");
956  Ty = getEffectiveSCEVType(Ty);
957
958  // Fold if the operand is constant.
959  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
960    return getConstant(
961      cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
962                                              getEffectiveSCEVType(Ty))));
963
964  // zext(zext(x)) --> zext(x)
965  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
966    return getZeroExtendExpr(SZ->getOperand(), Ty);
967
968  // Before doing any expensive analysis, check to see if we've already
969  // computed a SCEV for this Op and Ty.
970  FoldingSetNodeID ID;
971  ID.AddInteger(scZeroExtend);
972  ID.AddPointer(Op);
973  ID.AddPointer(Ty);
974  void *IP = 0;
975  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
976
977  // If the input value is a chrec scev, and we can prove that the value
978  // did not overflow the old, smaller, value, we can zero extend all of the
979  // operands (often constants).  This allows analysis of something like
980  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
981  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
982    if (AR->isAffine()) {
983      const SCEV *Start = AR->getStart();
984      const SCEV *Step = AR->getStepRecurrence(*this);
985      unsigned BitWidth = getTypeSizeInBits(AR->getType());
986      const Loop *L = AR->getLoop();
987
988      // If we have special knowledge that this addrec won't overflow,
989      // we don't need to do any further analysis.
990      if (AR->hasNoUnsignedWrap())
991        return getAddRecExpr(getZeroExtendExpr(Start, Ty),
992                             getZeroExtendExpr(Step, Ty),
993                             L);
994
995      // Check whether the backedge-taken count is SCEVCouldNotCompute.
996      // Note that this serves two purposes: It filters out loops that are
997      // simply not analyzable, and it covers the case where this code is
998      // being called from within backedge-taken count analysis, such that
999      // attempting to ask for the backedge-taken count would likely result
1000      // in infinite recursion. In the later case, the analysis code will
1001      // cope with a conservative value, and it will take care to purge
1002      // that value once it has finished.
1003      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1004      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1005        // Manually compute the final value for AR, checking for
1006        // overflow.
1007
1008        // Check whether the backedge-taken count can be losslessly casted to
1009        // the addrec's type. The count is always unsigned.
1010        const SCEV *CastedMaxBECount =
1011          getTruncateOrZeroExtend(MaxBECount, Start->getType());
1012        const SCEV *RecastedMaxBECount =
1013          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1014        if (MaxBECount == RecastedMaxBECount) {
1015          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1016          // Check whether Start+Step*MaxBECount has no unsigned overflow.
1017          const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
1018          const SCEV *Add = getAddExpr(Start, ZMul);
1019          const SCEV *OperandExtendedAdd =
1020            getAddExpr(getZeroExtendExpr(Start, WideTy),
1021                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1022                                  getZeroExtendExpr(Step, WideTy)));
1023          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
1024            // Return the expression with the addrec on the outside.
1025            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1026                                 getZeroExtendExpr(Step, Ty),
1027                                 L);
1028
1029          // Similar to above, only this time treat the step value as signed.
1030          // This covers loops that count down.
1031          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1032          Add = getAddExpr(Start, SMul);
1033          OperandExtendedAdd =
1034            getAddExpr(getZeroExtendExpr(Start, WideTy),
1035                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1036                                  getSignExtendExpr(Step, WideTy)));
1037          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
1038            // Return the expression with the addrec on the outside.
1039            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1040                                 getSignExtendExpr(Step, Ty),
1041                                 L);
1042        }
1043
1044        // If the backedge is guarded by a comparison with the pre-inc value
1045        // the addrec is safe. Also, if the entry is guarded by a comparison
1046        // with the start value and the backedge is guarded by a comparison
1047        // with the post-inc value, the addrec is safe.
1048        if (isKnownPositive(Step)) {
1049          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1050                                      getUnsignedRange(Step).getUnsignedMax());
1051          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1052              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
1053               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
1054                                           AR->getPostIncExpr(*this), N)))
1055            // Return the expression with the addrec on the outside.
1056            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1057                                 getZeroExtendExpr(Step, Ty),
1058                                 L);
1059        } else if (isKnownNegative(Step)) {
1060          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1061                                      getSignedRange(Step).getSignedMin());
1062          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1063              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
1064               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
1065                                           AR->getPostIncExpr(*this), N)))
1066            // Return the expression with the addrec on the outside.
1067            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1068                                 getSignExtendExpr(Step, Ty),
1069                                 L);
1070        }
1071      }
1072    }
1073
1074  // The cast wasn't folded; create an explicit cast node.
1075  // Recompute the insert position, as it may have been invalidated.
1076  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1077  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1078                                                   Op, Ty);
1079  UniqueSCEVs.InsertNode(S, IP);
1080  return S;
1081}
1082
1083const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1084                                               const Type *Ty) {
1085  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1086         "This is not an extending conversion!");
1087  assert(isSCEVable(Ty) &&
1088         "This is not a conversion to a SCEVable type!");
1089  Ty = getEffectiveSCEVType(Ty);
1090
1091  // Fold if the operand is constant.
1092  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1093    return getConstant(
1094      cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
1095                                              getEffectiveSCEVType(Ty))));
1096
1097  // sext(sext(x)) --> sext(x)
1098  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1099    return getSignExtendExpr(SS->getOperand(), Ty);
1100
1101  // Before doing any expensive analysis, check to see if we've already
1102  // computed a SCEV for this Op and Ty.
1103  FoldingSetNodeID ID;
1104  ID.AddInteger(scSignExtend);
1105  ID.AddPointer(Op);
1106  ID.AddPointer(Ty);
1107  void *IP = 0;
1108  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1109
1110  // If the input value is a chrec scev, and we can prove that the value
1111  // did not overflow the old, smaller, value, we can sign extend all of the
1112  // operands (often constants).  This allows analysis of something like
1113  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1114  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1115    if (AR->isAffine()) {
1116      const SCEV *Start = AR->getStart();
1117      const SCEV *Step = AR->getStepRecurrence(*this);
1118      unsigned BitWidth = getTypeSizeInBits(AR->getType());
1119      const Loop *L = AR->getLoop();
1120
1121      // If we have special knowledge that this addrec won't overflow,
1122      // we don't need to do any further analysis.
1123      if (AR->hasNoSignedWrap())
1124        return getAddRecExpr(getSignExtendExpr(Start, Ty),
1125                             getSignExtendExpr(Step, Ty),
1126                             L);
1127
1128      // Check whether the backedge-taken count is SCEVCouldNotCompute.
1129      // Note that this serves two purposes: It filters out loops that are
1130      // simply not analyzable, and it covers the case where this code is
1131      // being called from within backedge-taken count analysis, such that
1132      // attempting to ask for the backedge-taken count would likely result
1133      // in infinite recursion. In the later case, the analysis code will
1134      // cope with a conservative value, and it will take care to purge
1135      // that value once it has finished.
1136      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1137      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1138        // Manually compute the final value for AR, checking for
1139        // overflow.
1140
1141        // Check whether the backedge-taken count can be losslessly casted to
1142        // the addrec's type. The count is always unsigned.
1143        const SCEV *CastedMaxBECount =
1144          getTruncateOrZeroExtend(MaxBECount, Start->getType());
1145        const SCEV *RecastedMaxBECount =
1146          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1147        if (MaxBECount == RecastedMaxBECount) {
1148          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1149          // Check whether Start+Step*MaxBECount has no signed overflow.
1150          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1151          const SCEV *Add = getAddExpr(Start, SMul);
1152          const SCEV *OperandExtendedAdd =
1153            getAddExpr(getSignExtendExpr(Start, WideTy),
1154                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1155                                  getSignExtendExpr(Step, WideTy)));
1156          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1157            // Return the expression with the addrec on the outside.
1158            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1159                                 getSignExtendExpr(Step, Ty),
1160                                 L);
1161
1162          // Similar to above, only this time treat the step value as unsigned.
1163          // This covers loops that count up with an unsigned step.
1164          const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
1165          Add = getAddExpr(Start, UMul);
1166          OperandExtendedAdd =
1167            getAddExpr(getSignExtendExpr(Start, WideTy),
1168                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1169                                  getZeroExtendExpr(Step, WideTy)));
1170          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1171            // Return the expression with the addrec on the outside.
1172            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1173                                 getZeroExtendExpr(Step, Ty),
1174                                 L);
1175        }
1176
1177        // If the backedge is guarded by a comparison with the pre-inc value
1178        // the addrec is safe. Also, if the entry is guarded by a comparison
1179        // with the start value and the backedge is guarded by a comparison
1180        // with the post-inc value, the addrec is safe.
1181        if (isKnownPositive(Step)) {
1182          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
1183                                      getSignedRange(Step).getSignedMax());
1184          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
1185              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
1186               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
1187                                           AR->getPostIncExpr(*this), N)))
1188            // Return the expression with the addrec on the outside.
1189            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1190                                 getSignExtendExpr(Step, Ty),
1191                                 L);
1192        } else if (isKnownNegative(Step)) {
1193          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
1194                                      getSignedRange(Step).getSignedMin());
1195          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1196              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1197               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1198                                           AR->getPostIncExpr(*this), N)))
1199            // Return the expression with the addrec on the outside.
1200            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1201                                 getSignExtendExpr(Step, Ty),
1202                                 L);
1203        }
1204      }
1205    }
1206
1207  // The cast wasn't folded; create an explicit cast node.
1208  // Recompute the insert position, as it may have been invalidated.
1209  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1210  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1211                                                   Op, Ty);
1212  UniqueSCEVs.InsertNode(S, IP);
1213  return S;
1214}
1215
1216/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1217/// unspecified bits out to the given type.
1218///
1219const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1220                                              const Type *Ty) {
1221  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1222         "This is not an extending conversion!");
1223  assert(isSCEVable(Ty) &&
1224         "This is not a conversion to a SCEVable type!");
1225  Ty = getEffectiveSCEVType(Ty);
1226
1227  // Sign-extend negative constants.
1228  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1229    if (SC->getValue()->getValue().isNegative())
1230      return getSignExtendExpr(Op, Ty);
1231
1232  // Peel off a truncate cast.
1233  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1234    const SCEV *NewOp = T->getOperand();
1235    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1236      return getAnyExtendExpr(NewOp, Ty);
1237    return getTruncateOrNoop(NewOp, Ty);
1238  }
1239
1240  // Next try a zext cast. If the cast is folded, use it.
1241  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1242  if (!isa<SCEVZeroExtendExpr>(ZExt))
1243    return ZExt;
1244
1245  // Next try a sext cast. If the cast is folded, use it.
1246  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1247  if (!isa<SCEVSignExtendExpr>(SExt))
1248    return SExt;
1249
1250  // Force the cast to be folded into the operands of an addrec.
1251  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1252    SmallVector<const SCEV *, 4> Ops;
1253    for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1254         I != E; ++I)
1255      Ops.push_back(getAnyExtendExpr(*I, Ty));
1256    return getAddRecExpr(Ops, AR->getLoop());
1257  }
1258
1259  // As a special case, fold anyext(undef) to undef. We don't want to
1260  // know too much about SCEVUnknowns, but this special case is handy
1261  // and harmless.
1262  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
1263    if (isa<UndefValue>(U->getValue()))
1264      return getSCEV(UndefValue::get(Ty));
1265
1266  // If the expression is obviously signed, use the sext cast value.
1267  if (isa<SCEVSMaxExpr>(Op))
1268    return SExt;
1269
1270  // Absent any other information, use the zext cast value.
1271  return ZExt;
1272}
1273
1274/// CollectAddOperandsWithScales - Process the given Ops list, which is
1275/// a list of operands to be added under the given scale, update the given
1276/// map. This is a helper function for getAddRecExpr. As an example of
1277/// what it does, given a sequence of operands that would form an add
1278/// expression like this:
1279///
1280///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1281///
1282/// where A and B are constants, update the map with these values:
1283///
1284///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1285///
1286/// and add 13 + A*B*29 to AccumulatedConstant.
1287/// This will allow getAddRecExpr to produce this:
1288///
1289///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1290///
1291/// This form often exposes folding opportunities that are hidden in
1292/// the original operand list.
1293///
1294/// Return true iff it appears that any interesting folding opportunities
1295/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1296/// the common case where no interesting opportunities are present, and
1297/// is also used as a check to avoid infinite recursion.
1298///
1299static bool
1300CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1301                             SmallVector<const SCEV *, 8> &NewOps,
1302                             APInt &AccumulatedConstant,
1303                             const SCEV *const *Ops, size_t NumOperands,
1304                             const APInt &Scale,
1305                             ScalarEvolution &SE) {
1306  bool Interesting = false;
1307
1308  // Iterate over the add operands. They are sorted, with constants first.
1309  unsigned i = 0;
1310  while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1311    ++i;
1312    // Pull a buried constant out to the outside.
1313    if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1314      Interesting = true;
1315    AccumulatedConstant += Scale * C->getValue()->getValue();
1316  }
1317
1318  // Next comes everything else. We're especially interested in multiplies
1319  // here, but they're in the middle, so just visit the rest with one loop.
1320  for (; i != NumOperands; ++i) {
1321    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1322    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1323      APInt NewScale =
1324        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1325      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1326        // A multiplication of a constant with another add; recurse.
1327        const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1328        Interesting |=
1329          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1330                                       Add->op_begin(), Add->getNumOperands(),
1331                                       NewScale, SE);
1332      } else {
1333        // A multiplication of a constant with some other value. Update
1334        // the map.
1335        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1336        const SCEV *Key = SE.getMulExpr(MulOps);
1337        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1338          M.insert(std::make_pair(Key, NewScale));
1339        if (Pair.second) {
1340          NewOps.push_back(Pair.first->first);
1341        } else {
1342          Pair.first->second += NewScale;
1343          // The map already had an entry for this value, which may indicate
1344          // a folding opportunity.
1345          Interesting = true;
1346        }
1347      }
1348    } else {
1349      // An ordinary operand. Update the map.
1350      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1351        M.insert(std::make_pair(Ops[i], Scale));
1352      if (Pair.second) {
1353        NewOps.push_back(Pair.first->first);
1354      } else {
1355        Pair.first->second += Scale;
1356        // The map already had an entry for this value, which may indicate
1357        // a folding opportunity.
1358        Interesting = true;
1359      }
1360    }
1361  }
1362
1363  return Interesting;
1364}
1365
1366namespace {
1367  struct APIntCompare {
1368    bool operator()(const APInt &LHS, const APInt &RHS) const {
1369      return LHS.ult(RHS);
1370    }
1371  };
1372}
1373
1374/// getAddExpr - Get a canonical add expression, or something simpler if
1375/// possible.
1376const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1377                                        bool HasNUW, bool HasNSW) {
1378  assert(!Ops.empty() && "Cannot get empty add!");
1379  if (Ops.size() == 1) return Ops[0];
1380#ifndef NDEBUG
1381  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1382  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1383    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1384           "SCEVAddExpr operand types don't match!");
1385#endif
1386
1387  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1388  if (!HasNUW && HasNSW) {
1389    bool All = true;
1390    for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1391         E = Ops.end(); I != E; ++I)
1392      if (!isKnownNonNegative(*I)) {
1393        All = false;
1394        break;
1395      }
1396    if (All) HasNUW = true;
1397  }
1398
1399  // Sort by complexity, this groups all similar expression types together.
1400  GroupByComplexity(Ops, LI);
1401
1402  // If there are any constants, fold them together.
1403  unsigned Idx = 0;
1404  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1405    ++Idx;
1406    assert(Idx < Ops.size());
1407    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1408      // We found two constants, fold them together!
1409      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1410                           RHSC->getValue()->getValue());
1411      if (Ops.size() == 2) return Ops[0];
1412      Ops.erase(Ops.begin()+1);  // Erase the folded element
1413      LHSC = cast<SCEVConstant>(Ops[0]);
1414    }
1415
1416    // If we are left with a constant zero being added, strip it off.
1417    if (LHSC->getValue()->isZero()) {
1418      Ops.erase(Ops.begin());
1419      --Idx;
1420    }
1421
1422    if (Ops.size() == 1) return Ops[0];
1423  }
1424
1425  // Okay, check to see if the same value occurs in the operand list more than
1426  // once.  If so, merge them together into an multiply expression.  Since we
1427  // sorted the list, these values are required to be adjacent.
1428  const Type *Ty = Ops[0]->getType();
1429  bool FoundMatch = false;
1430  for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
1431    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1432      // Scan ahead to count how many equal operands there are.
1433      unsigned Count = 2;
1434      while (i+Count != e && Ops[i+Count] == Ops[i])
1435        ++Count;
1436      // Merge the values into a multiply.
1437      const SCEV *Scale = getConstant(Ty, Count);
1438      const SCEV *Mul = getMulExpr(Scale, Ops[i]);
1439      if (Ops.size() == Count)
1440        return Mul;
1441      Ops[i] = Mul;
1442      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
1443      --i; e -= Count - 1;
1444      FoundMatch = true;
1445    }
1446  if (FoundMatch)
1447    return getAddExpr(Ops, HasNUW, HasNSW);
1448
1449  // Check for truncates. If all the operands are truncated from the same
1450  // type, see if factoring out the truncate would permit the result to be
1451  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1452  // if the contents of the resulting outer trunc fold to something simple.
1453  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1454    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1455    const Type *DstType = Trunc->getType();
1456    const Type *SrcType = Trunc->getOperand()->getType();
1457    SmallVector<const SCEV *, 8> LargeOps;
1458    bool Ok = true;
1459    // Check all the operands to see if they can be represented in the
1460    // source type of the truncate.
1461    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1462      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1463        if (T->getOperand()->getType() != SrcType) {
1464          Ok = false;
1465          break;
1466        }
1467        LargeOps.push_back(T->getOperand());
1468      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1469        LargeOps.push_back(getAnyExtendExpr(C, SrcType));
1470      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1471        SmallVector<const SCEV *, 8> LargeMulOps;
1472        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1473          if (const SCEVTruncateExpr *T =
1474                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1475            if (T->getOperand()->getType() != SrcType) {
1476              Ok = false;
1477              break;
1478            }
1479            LargeMulOps.push_back(T->getOperand());
1480          } else if (const SCEVConstant *C =
1481                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1482            LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
1483          } else {
1484            Ok = false;
1485            break;
1486          }
1487        }
1488        if (Ok)
1489          LargeOps.push_back(getMulExpr(LargeMulOps));
1490      } else {
1491        Ok = false;
1492        break;
1493      }
1494    }
1495    if (Ok) {
1496      // Evaluate the expression in the larger type.
1497      const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
1498      // If it folds to something simple, use it. Otherwise, don't.
1499      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1500        return getTruncateExpr(Fold, DstType);
1501    }
1502  }
1503
1504  // Skip past any other cast SCEVs.
1505  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1506    ++Idx;
1507
1508  // If there are add operands they would be next.
1509  if (Idx < Ops.size()) {
1510    bool DeletedAdd = false;
1511    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1512      // If we have an add, expand the add operands onto the end of the operands
1513      // list.
1514      Ops.erase(Ops.begin()+Idx);
1515      Ops.append(Add->op_begin(), Add->op_end());
1516      DeletedAdd = true;
1517    }
1518
1519    // If we deleted at least one add, we added operands to the end of the list,
1520    // and they are not necessarily sorted.  Recurse to resort and resimplify
1521    // any operands we just acquired.
1522    if (DeletedAdd)
1523      return getAddExpr(Ops);
1524  }
1525
1526  // Skip over the add expression until we get to a multiply.
1527  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1528    ++Idx;
1529
1530  // Check to see if there are any folding opportunities present with
1531  // operands multiplied by constant values.
1532  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1533    uint64_t BitWidth = getTypeSizeInBits(Ty);
1534    DenseMap<const SCEV *, APInt> M;
1535    SmallVector<const SCEV *, 8> NewOps;
1536    APInt AccumulatedConstant(BitWidth, 0);
1537    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1538                                     Ops.data(), Ops.size(),
1539                                     APInt(BitWidth, 1), *this)) {
1540      // Some interesting folding opportunity is present, so its worthwhile to
1541      // re-generate the operands list. Group the operands by constant scale,
1542      // to avoid multiplying by the same constant scale multiple times.
1543      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1544      for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
1545           E = NewOps.end(); I != E; ++I)
1546        MulOpLists[M.find(*I)->second].push_back(*I);
1547      // Re-generate the operands list.
1548      Ops.clear();
1549      if (AccumulatedConstant != 0)
1550        Ops.push_back(getConstant(AccumulatedConstant));
1551      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1552           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1553        if (I->first != 0)
1554          Ops.push_back(getMulExpr(getConstant(I->first),
1555                                   getAddExpr(I->second)));
1556      if (Ops.empty())
1557        return getConstant(Ty, 0);
1558      if (Ops.size() == 1)
1559        return Ops[0];
1560      return getAddExpr(Ops);
1561    }
1562  }
1563
1564  // If we are adding something to a multiply expression, make sure the
1565  // something is not already an operand of the multiply.  If so, merge it into
1566  // the multiply.
1567  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1568    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1569    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1570      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1571      if (isa<SCEVConstant>(MulOpSCEV))
1572        continue;
1573      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1574        if (MulOpSCEV == Ops[AddOp]) {
1575          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1576          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1577          if (Mul->getNumOperands() != 2) {
1578            // If the multiply has more than two operands, we must get the
1579            // Y*Z term.
1580            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1581                                                Mul->op_begin()+MulOp);
1582            MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1583            InnerMul = getMulExpr(MulOps);
1584          }
1585          const SCEV *One = getConstant(Ty, 1);
1586          const SCEV *AddOne = getAddExpr(One, InnerMul);
1587          const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
1588          if (Ops.size() == 2) return OuterMul;
1589          if (AddOp < Idx) {
1590            Ops.erase(Ops.begin()+AddOp);
1591            Ops.erase(Ops.begin()+Idx-1);
1592          } else {
1593            Ops.erase(Ops.begin()+Idx);
1594            Ops.erase(Ops.begin()+AddOp-1);
1595          }
1596          Ops.push_back(OuterMul);
1597          return getAddExpr(Ops);
1598        }
1599
1600      // Check this multiply against other multiplies being added together.
1601      for (unsigned OtherMulIdx = Idx+1;
1602           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1603           ++OtherMulIdx) {
1604        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1605        // If MulOp occurs in OtherMul, we can fold the two multiplies
1606        // together.
1607        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1608             OMulOp != e; ++OMulOp)
1609          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1610            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1611            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1612            if (Mul->getNumOperands() != 2) {
1613              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1614                                                  Mul->op_begin()+MulOp);
1615              MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1616              InnerMul1 = getMulExpr(MulOps);
1617            }
1618            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1619            if (OtherMul->getNumOperands() != 2) {
1620              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1621                                                  OtherMul->op_begin()+OMulOp);
1622              MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
1623              InnerMul2 = getMulExpr(MulOps);
1624            }
1625            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1626            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1627            if (Ops.size() == 2) return OuterMul;
1628            Ops.erase(Ops.begin()+Idx);
1629            Ops.erase(Ops.begin()+OtherMulIdx-1);
1630            Ops.push_back(OuterMul);
1631            return getAddExpr(Ops);
1632          }
1633      }
1634    }
1635  }
1636
1637  // If there are any add recurrences in the operands list, see if any other
1638  // added values are loop invariant.  If so, we can fold them into the
1639  // recurrence.
1640  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1641    ++Idx;
1642
1643  // Scan over all recurrences, trying to fold loop invariants into them.
1644  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1645    // Scan all of the other operands to this add and add them to the vector if
1646    // they are loop invariant w.r.t. the recurrence.
1647    SmallVector<const SCEV *, 8> LIOps;
1648    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1649    const Loop *AddRecLoop = AddRec->getLoop();
1650    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1651      if (Ops[i]->isLoopInvariant(AddRecLoop)) {
1652        LIOps.push_back(Ops[i]);
1653        Ops.erase(Ops.begin()+i);
1654        --i; --e;
1655      }
1656
1657    // If we found some loop invariants, fold them into the recurrence.
1658    if (!LIOps.empty()) {
1659      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1660      LIOps.push_back(AddRec->getStart());
1661
1662      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1663                                             AddRec->op_end());
1664      AddRecOps[0] = getAddExpr(LIOps);
1665
1666      // Build the new addrec. Propagate the NUW and NSW flags if both the
1667      // outer add and the inner addrec are guaranteed to have no overflow.
1668      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
1669                                         HasNUW && AddRec->hasNoUnsignedWrap(),
1670                                         HasNSW && AddRec->hasNoSignedWrap());
1671
1672      // If all of the other operands were loop invariant, we are done.
1673      if (Ops.size() == 1) return NewRec;
1674
1675      // Otherwise, add the folded AddRec by the non-liv parts.
1676      for (unsigned i = 0;; ++i)
1677        if (Ops[i] == AddRec) {
1678          Ops[i] = NewRec;
1679          break;
1680        }
1681      return getAddExpr(Ops);
1682    }
1683
1684    // Okay, if there weren't any loop invariants to be folded, check to see if
1685    // there are multiple AddRec's with the same loop induction variable being
1686    // added together.  If so, we can fold them.
1687    for (unsigned OtherIdx = Idx+1;
1688         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1689         ++OtherIdx)
1690      if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1691        // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
1692        SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1693                                               AddRec->op_end());
1694        for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1695             ++OtherIdx)
1696          if (const SCEVAddRecExpr *OtherAddRec =
1697                dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
1698            if (OtherAddRec->getLoop() == AddRecLoop) {
1699              for (unsigned i = 0, e = OtherAddRec->getNumOperands();
1700                   i != e; ++i) {
1701                if (i >= AddRecOps.size()) {
1702                  AddRecOps.append(OtherAddRec->op_begin()+i,
1703                                   OtherAddRec->op_end());
1704                  break;
1705                }
1706                AddRecOps[i] = getAddExpr(AddRecOps[i],
1707                                          OtherAddRec->getOperand(i));
1708              }
1709              Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
1710            }
1711        Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop);
1712        return getAddExpr(Ops);
1713      }
1714
1715    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1716    // next one.
1717  }
1718
1719  // Okay, it looks like we really DO need an add expr.  Check to see if we
1720  // already have one, otherwise create a new one.
1721  FoldingSetNodeID ID;
1722  ID.AddInteger(scAddExpr);
1723  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1724    ID.AddPointer(Ops[i]);
1725  void *IP = 0;
1726  SCEVAddExpr *S =
1727    static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1728  if (!S) {
1729    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1730    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1731    S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1732                                        O, Ops.size());
1733    UniqueSCEVs.InsertNode(S, IP);
1734  }
1735  if (HasNUW) S->setHasNoUnsignedWrap(true);
1736  if (HasNSW) S->setHasNoSignedWrap(true);
1737  return S;
1738}
1739
1740/// getMulExpr - Get a canonical multiply expression, or something simpler if
1741/// possible.
1742const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1743                                        bool HasNUW, bool HasNSW) {
1744  assert(!Ops.empty() && "Cannot get empty mul!");
1745  if (Ops.size() == 1) return Ops[0];
1746#ifndef NDEBUG
1747  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1748  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1749    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1750           "SCEVMulExpr operand types don't match!");
1751#endif
1752
1753  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1754  if (!HasNUW && HasNSW) {
1755    bool All = true;
1756    for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1757         E = Ops.end(); I != E; ++I)
1758      if (!isKnownNonNegative(*I)) {
1759        All = false;
1760        break;
1761      }
1762    if (All) HasNUW = true;
1763  }
1764
1765  // Sort by complexity, this groups all similar expression types together.
1766  GroupByComplexity(Ops, LI);
1767
1768  // If there are any constants, fold them together.
1769  unsigned Idx = 0;
1770  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1771
1772    // C1*(C2+V) -> C1*C2 + C1*V
1773    if (Ops.size() == 2)
1774      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1775        if (Add->getNumOperands() == 2 &&
1776            isa<SCEVConstant>(Add->getOperand(0)))
1777          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1778                            getMulExpr(LHSC, Add->getOperand(1)));
1779
1780    ++Idx;
1781    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1782      // We found two constants, fold them together!
1783      ConstantInt *Fold = ConstantInt::get(getContext(),
1784                                           LHSC->getValue()->getValue() *
1785                                           RHSC->getValue()->getValue());
1786      Ops[0] = getConstant(Fold);
1787      Ops.erase(Ops.begin()+1);  // Erase the folded element
1788      if (Ops.size() == 1) return Ops[0];
1789      LHSC = cast<SCEVConstant>(Ops[0]);
1790    }
1791
1792    // If we are left with a constant one being multiplied, strip it off.
1793    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1794      Ops.erase(Ops.begin());
1795      --Idx;
1796    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1797      // If we have a multiply of zero, it will always be zero.
1798      return Ops[0];
1799    } else if (Ops[0]->isAllOnesValue()) {
1800      // If we have a mul by -1 of an add, try distributing the -1 among the
1801      // add operands.
1802      if (Ops.size() == 2)
1803        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1804          SmallVector<const SCEV *, 4> NewOps;
1805          bool AnyFolded = false;
1806          for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1807               I != E; ++I) {
1808            const SCEV *Mul = getMulExpr(Ops[0], *I);
1809            if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1810            NewOps.push_back(Mul);
1811          }
1812          if (AnyFolded)
1813            return getAddExpr(NewOps);
1814        }
1815    }
1816
1817    if (Ops.size() == 1)
1818      return Ops[0];
1819  }
1820
1821  // Skip over the add expression until we get to a multiply.
1822  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1823    ++Idx;
1824
1825  // If there are mul operands inline them all into this expression.
1826  if (Idx < Ops.size()) {
1827    bool DeletedMul = false;
1828    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1829      // If we have an mul, expand the mul operands onto the end of the operands
1830      // list.
1831      Ops.erase(Ops.begin()+Idx);
1832      Ops.append(Mul->op_begin(), Mul->op_end());
1833      DeletedMul = true;
1834    }
1835
1836    // If we deleted at least one mul, we added operands to the end of the list,
1837    // and they are not necessarily sorted.  Recurse to resort and resimplify
1838    // any operands we just acquired.
1839    if (DeletedMul)
1840      return getMulExpr(Ops);
1841  }
1842
1843  // If there are any add recurrences in the operands list, see if any other
1844  // added values are loop invariant.  If so, we can fold them into the
1845  // recurrence.
1846  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1847    ++Idx;
1848
1849  // Scan over all recurrences, trying to fold loop invariants into them.
1850  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1851    // Scan all of the other operands to this mul and add them to the vector if
1852    // they are loop invariant w.r.t. the recurrence.
1853    SmallVector<const SCEV *, 8> LIOps;
1854    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1855    const Loop *AddRecLoop = AddRec->getLoop();
1856    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1857      if (Ops[i]->isLoopInvariant(AddRecLoop)) {
1858        LIOps.push_back(Ops[i]);
1859        Ops.erase(Ops.begin()+i);
1860        --i; --e;
1861      }
1862
1863    // If we found some loop invariants, fold them into the recurrence.
1864    if (!LIOps.empty()) {
1865      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1866      SmallVector<const SCEV *, 4> NewOps;
1867      NewOps.reserve(AddRec->getNumOperands());
1868      const SCEV *Scale = getMulExpr(LIOps);
1869      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1870        NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1871
1872      // Build the new addrec. Propagate the NUW and NSW flags if both the
1873      // outer mul and the inner addrec are guaranteed to have no overflow.
1874      const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop,
1875                                         HasNUW && AddRec->hasNoUnsignedWrap(),
1876                                         HasNSW && AddRec->hasNoSignedWrap());
1877
1878      // If all of the other operands were loop invariant, we are done.
1879      if (Ops.size() == 1) return NewRec;
1880
1881      // Otherwise, multiply the folded AddRec by the non-liv parts.
1882      for (unsigned i = 0;; ++i)
1883        if (Ops[i] == AddRec) {
1884          Ops[i] = NewRec;
1885          break;
1886        }
1887      return getMulExpr(Ops);
1888    }
1889
1890    // Okay, if there weren't any loop invariants to be folded, check to see if
1891    // there are multiple AddRec's with the same loop induction variable being
1892    // multiplied together.  If so, we can fold them.
1893    for (unsigned OtherIdx = Idx+1;
1894         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1895         ++OtherIdx)
1896      if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1897        // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L>  -->
1898        // {A*C,+,F*D + G*B + B*D}<L>
1899        for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1900             ++OtherIdx)
1901          if (const SCEVAddRecExpr *OtherAddRec =
1902                dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
1903            if (OtherAddRec->getLoop() == AddRecLoop) {
1904              const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1905              const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
1906              const SCEV *B = F->getStepRecurrence(*this);
1907              const SCEV *D = G->getStepRecurrence(*this);
1908              const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1909                                               getMulExpr(G, B),
1910                                               getMulExpr(B, D));
1911              const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1912                                                    F->getLoop());
1913              if (Ops.size() == 2) return NewAddRec;
1914              Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
1915              Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
1916            }
1917        return getMulExpr(Ops);
1918      }
1919
1920    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1921    // next one.
1922  }
1923
1924  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1925  // already have one, otherwise create a new one.
1926  FoldingSetNodeID ID;
1927  ID.AddInteger(scMulExpr);
1928  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1929    ID.AddPointer(Ops[i]);
1930  void *IP = 0;
1931  SCEVMulExpr *S =
1932    static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1933  if (!S) {
1934    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1935    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1936    S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
1937                                        O, Ops.size());
1938    UniqueSCEVs.InsertNode(S, IP);
1939  }
1940  if (HasNUW) S->setHasNoUnsignedWrap(true);
1941  if (HasNSW) S->setHasNoSignedWrap(true);
1942  return S;
1943}
1944
1945/// getUDivExpr - Get a canonical unsigned division expression, or something
1946/// simpler if possible.
1947const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1948                                         const SCEV *RHS) {
1949  assert(getEffectiveSCEVType(LHS->getType()) ==
1950         getEffectiveSCEVType(RHS->getType()) &&
1951         "SCEVUDivExpr operand types don't match!");
1952
1953  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1954    if (RHSC->getValue()->equalsInt(1))
1955      return LHS;                               // X udiv 1 --> x
1956    // If the denominator is zero, the result of the udiv is undefined. Don't
1957    // try to analyze it, because the resolution chosen here may differ from
1958    // the resolution chosen in other parts of the compiler.
1959    if (!RHSC->getValue()->isZero()) {
1960      // Determine if the division can be folded into the operands of
1961      // its operands.
1962      // TODO: Generalize this to non-constants by using known-bits information.
1963      const Type *Ty = LHS->getType();
1964      unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1965      unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
1966      // For non-power-of-two values, effectively round the value up to the
1967      // nearest power of two.
1968      if (!RHSC->getValue()->getValue().isPowerOf2())
1969        ++MaxShiftAmt;
1970      const IntegerType *ExtTy =
1971        IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1972      // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1973      if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1974        if (const SCEVConstant *Step =
1975              dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1976          if (!Step->getValue()->getValue()
1977                .urem(RHSC->getValue()->getValue()) &&
1978              getZeroExtendExpr(AR, ExtTy) ==
1979              getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1980                            getZeroExtendExpr(Step, ExtTy),
1981                            AR->getLoop())) {
1982            SmallVector<const SCEV *, 4> Operands;
1983            for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1984              Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1985            return getAddRecExpr(Operands, AR->getLoop());
1986          }
1987      // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1988      if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1989        SmallVector<const SCEV *, 4> Operands;
1990        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1991          Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1992        if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1993          // Find an operand that's safely divisible.
1994          for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1995            const SCEV *Op = M->getOperand(i);
1996            const SCEV *Div = getUDivExpr(Op, RHSC);
1997            if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1998              Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
1999                                                      M->op_end());
2000              Operands[i] = Div;
2001              return getMulExpr(Operands);
2002            }
2003          }
2004      }
2005      // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
2006      if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
2007        SmallVector<const SCEV *, 4> Operands;
2008        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
2009          Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
2010        if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2011          Operands.clear();
2012          for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2013            const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2014            if (isa<SCEVUDivExpr>(Op) ||
2015                getMulExpr(Op, RHS) != A->getOperand(i))
2016              break;
2017            Operands.push_back(Op);
2018          }
2019          if (Operands.size() == A->getNumOperands())
2020            return getAddExpr(Operands);
2021        }
2022      }
2023
2024      // Fold if both operands are constant.
2025      if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2026        Constant *LHSCV = LHSC->getValue();
2027        Constant *RHSCV = RHSC->getValue();
2028        return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2029                                                                   RHSCV)));
2030      }
2031    }
2032  }
2033
2034  FoldingSetNodeID ID;
2035  ID.AddInteger(scUDivExpr);
2036  ID.AddPointer(LHS);
2037  ID.AddPointer(RHS);
2038  void *IP = 0;
2039  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2040  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2041                                             LHS, RHS);
2042  UniqueSCEVs.InsertNode(S, IP);
2043  return S;
2044}
2045
2046
2047/// getAddRecExpr - Get an add recurrence expression for the specified loop.
2048/// Simplify the expression as much as possible.
2049const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
2050                                           const SCEV *Step, const Loop *L,
2051                                           bool HasNUW, bool HasNSW) {
2052  SmallVector<const SCEV *, 4> Operands;
2053  Operands.push_back(Start);
2054  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2055    if (StepChrec->getLoop() == L) {
2056      Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2057      return getAddRecExpr(Operands, L);
2058    }
2059
2060  Operands.push_back(Step);
2061  return getAddRecExpr(Operands, L, HasNUW, HasNSW);
2062}
2063
2064/// getAddRecExpr - Get an add recurrence expression for the specified loop.
2065/// Simplify the expression as much as possible.
2066const SCEV *
2067ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2068                               const Loop *L,
2069                               bool HasNUW, bool HasNSW) {
2070  if (Operands.size() == 1) return Operands[0];
2071#ifndef NDEBUG
2072  const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2073  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2074    assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2075           "SCEVAddRecExpr operand types don't match!");
2076#endif
2077
2078  if (Operands.back()->isZero()) {
2079    Operands.pop_back();
2080    return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0}  -->  X
2081  }
2082
2083  // It's tempting to want to call getMaxBackedgeTakenCount count here and
2084  // use that information to infer NUW and NSW flags. However, computing a
2085  // BE count requires calling getAddRecExpr, so we may not yet have a
2086  // meaningful BE count at this point (and if we don't, we'd be stuck
2087  // with a SCEVCouldNotCompute as the cached BE count).
2088
2089  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
2090  if (!HasNUW && HasNSW) {
2091    bool All = true;
2092    for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
2093         E = Operands.end(); I != E; ++I)
2094      if (!isKnownNonNegative(*I)) {
2095        All = false;
2096        break;
2097      }
2098    if (All) HasNUW = true;
2099  }
2100
2101  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2102  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2103    const Loop *NestedLoop = NestedAR->getLoop();
2104    if (L->contains(NestedLoop) ?
2105        (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2106        (!NestedLoop->contains(L) &&
2107         DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2108      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2109                                                  NestedAR->op_end());
2110      Operands[0] = NestedAR->getStart();
2111      // AddRecs require their operands be loop-invariant with respect to their
2112      // loops. Don't perform this transformation if it would break this
2113      // requirement.
2114      bool AllInvariant = true;
2115      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2116        if (!Operands[i]->isLoopInvariant(L)) {
2117          AllInvariant = false;
2118          break;
2119        }
2120      if (AllInvariant) {
2121        NestedOperands[0] = getAddRecExpr(Operands, L);
2122        AllInvariant = true;
2123        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2124          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
2125            AllInvariant = false;
2126            break;
2127          }
2128        if (AllInvariant)
2129          // Ok, both add recurrences are valid after the transformation.
2130          return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
2131      }
2132      // Reset Operands to its original state.
2133      Operands[0] = NestedAR;
2134    }
2135  }
2136
2137  // Okay, it looks like we really DO need an addrec expr.  Check to see if we
2138  // already have one, otherwise create a new one.
2139  FoldingSetNodeID ID;
2140  ID.AddInteger(scAddRecExpr);
2141  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2142    ID.AddPointer(Operands[i]);
2143  ID.AddPointer(L);
2144  void *IP = 0;
2145  SCEVAddRecExpr *S =
2146    static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2147  if (!S) {
2148    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2149    std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2150    S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2151                                           O, Operands.size(), L);
2152    UniqueSCEVs.InsertNode(S, IP);
2153  }
2154  if (HasNUW) S->setHasNoUnsignedWrap(true);
2155  if (HasNSW) S->setHasNoSignedWrap(true);
2156  return S;
2157}
2158
2159const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2160                                         const SCEV *RHS) {
2161  SmallVector<const SCEV *, 2> Ops;
2162  Ops.push_back(LHS);
2163  Ops.push_back(RHS);
2164  return getSMaxExpr(Ops);
2165}
2166
2167const SCEV *
2168ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2169  assert(!Ops.empty() && "Cannot get empty smax!");
2170  if (Ops.size() == 1) return Ops[0];
2171#ifndef NDEBUG
2172  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2173  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2174    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2175           "SCEVSMaxExpr operand types don't match!");
2176#endif
2177
2178  // Sort by complexity, this groups all similar expression types together.
2179  GroupByComplexity(Ops, LI);
2180
2181  // If there are any constants, fold them together.
2182  unsigned Idx = 0;
2183  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2184    ++Idx;
2185    assert(Idx < Ops.size());
2186    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2187      // We found two constants, fold them together!
2188      ConstantInt *Fold = ConstantInt::get(getContext(),
2189                              APIntOps::smax(LHSC->getValue()->getValue(),
2190                                             RHSC->getValue()->getValue()));
2191      Ops[0] = getConstant(Fold);
2192      Ops.erase(Ops.begin()+1);  // Erase the folded element
2193      if (Ops.size() == 1) return Ops[0];
2194      LHSC = cast<SCEVConstant>(Ops[0]);
2195    }
2196
2197    // If we are left with a constant minimum-int, strip it off.
2198    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2199      Ops.erase(Ops.begin());
2200      --Idx;
2201    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2202      // If we have an smax with a constant maximum-int, it will always be
2203      // maximum-int.
2204      return Ops[0];
2205    }
2206
2207    if (Ops.size() == 1) return Ops[0];
2208  }
2209
2210  // Find the first SMax
2211  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2212    ++Idx;
2213
2214  // Check to see if one of the operands is an SMax. If so, expand its operands
2215  // onto our operand list, and recurse to simplify.
2216  if (Idx < Ops.size()) {
2217    bool DeletedSMax = false;
2218    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2219      Ops.erase(Ops.begin()+Idx);
2220      Ops.append(SMax->op_begin(), SMax->op_end());
2221      DeletedSMax = true;
2222    }
2223
2224    if (DeletedSMax)
2225      return getSMaxExpr(Ops);
2226  }
2227
2228  // Okay, check to see if the same value occurs in the operand list twice.  If
2229  // so, delete one.  Since we sorted the list, these values are required to
2230  // be adjacent.
2231  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2232    //  X smax Y smax Y  -->  X smax Y
2233    //  X smax Y         -->  X, if X is always greater than Y
2234    if (Ops[i] == Ops[i+1] ||
2235        isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2236      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2237      --i; --e;
2238    } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2239      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2240      --i; --e;
2241    }
2242
2243  if (Ops.size() == 1) return Ops[0];
2244
2245  assert(!Ops.empty() && "Reduced smax down to nothing!");
2246
2247  // Okay, it looks like we really DO need an smax expr.  Check to see if we
2248  // already have one, otherwise create a new one.
2249  FoldingSetNodeID ID;
2250  ID.AddInteger(scSMaxExpr);
2251  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2252    ID.AddPointer(Ops[i]);
2253  void *IP = 0;
2254  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2255  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2256  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2257  SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2258                                             O, Ops.size());
2259  UniqueSCEVs.InsertNode(S, IP);
2260  return S;
2261}
2262
2263const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2264                                         const SCEV *RHS) {
2265  SmallVector<const SCEV *, 2> Ops;
2266  Ops.push_back(LHS);
2267  Ops.push_back(RHS);
2268  return getUMaxExpr(Ops);
2269}
2270
2271const SCEV *
2272ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2273  assert(!Ops.empty() && "Cannot get empty umax!");
2274  if (Ops.size() == 1) return Ops[0];
2275#ifndef NDEBUG
2276  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2277  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2278    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2279           "SCEVUMaxExpr operand types don't match!");
2280#endif
2281
2282  // Sort by complexity, this groups all similar expression types together.
2283  GroupByComplexity(Ops, LI);
2284
2285  // If there are any constants, fold them together.
2286  unsigned Idx = 0;
2287  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2288    ++Idx;
2289    assert(Idx < Ops.size());
2290    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2291      // We found two constants, fold them together!
2292      ConstantInt *Fold = ConstantInt::get(getContext(),
2293                              APIntOps::umax(LHSC->getValue()->getValue(),
2294                                             RHSC->getValue()->getValue()));
2295      Ops[0] = getConstant(Fold);
2296      Ops.erase(Ops.begin()+1);  // Erase the folded element
2297      if (Ops.size() == 1) return Ops[0];
2298      LHSC = cast<SCEVConstant>(Ops[0]);
2299    }
2300
2301    // If we are left with a constant minimum-int, strip it off.
2302    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2303      Ops.erase(Ops.begin());
2304      --Idx;
2305    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2306      // If we have an umax with a constant maximum-int, it will always be
2307      // maximum-int.
2308      return Ops[0];
2309    }
2310
2311    if (Ops.size() == 1) return Ops[0];
2312  }
2313
2314  // Find the first UMax
2315  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2316    ++Idx;
2317
2318  // Check to see if one of the operands is a UMax. If so, expand its operands
2319  // onto our operand list, and recurse to simplify.
2320  if (Idx < Ops.size()) {
2321    bool DeletedUMax = false;
2322    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2323      Ops.erase(Ops.begin()+Idx);
2324      Ops.append(UMax->op_begin(), UMax->op_end());
2325      DeletedUMax = true;
2326    }
2327
2328    if (DeletedUMax)
2329      return getUMaxExpr(Ops);
2330  }
2331
2332  // Okay, check to see if the same value occurs in the operand list twice.  If
2333  // so, delete one.  Since we sorted the list, these values are required to
2334  // be adjacent.
2335  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2336    //  X umax Y umax Y  -->  X umax Y
2337    //  X umax Y         -->  X, if X is always greater than Y
2338    if (Ops[i] == Ops[i+1] ||
2339        isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2340      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2341      --i; --e;
2342    } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
2343      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2344      --i; --e;
2345    }
2346
2347  if (Ops.size() == 1) return Ops[0];
2348
2349  assert(!Ops.empty() && "Reduced umax down to nothing!");
2350
2351  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2352  // already have one, otherwise create a new one.
2353  FoldingSetNodeID ID;
2354  ID.AddInteger(scUMaxExpr);
2355  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2356    ID.AddPointer(Ops[i]);
2357  void *IP = 0;
2358  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2359  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2360  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2361  SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2362                                             O, Ops.size());
2363  UniqueSCEVs.InsertNode(S, IP);
2364  return S;
2365}
2366
2367const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2368                                         const SCEV *RHS) {
2369  // ~smax(~x, ~y) == smin(x, y).
2370  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2371}
2372
2373const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2374                                         const SCEV *RHS) {
2375  // ~umax(~x, ~y) == umin(x, y)
2376  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2377}
2378
2379const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
2380  // If we have TargetData, we can bypass creating a target-independent
2381  // constant expression and then folding it back into a ConstantInt.
2382  // This is just a compile-time optimization.
2383  if (TD)
2384    return getConstant(TD->getIntPtrType(getContext()),
2385                       TD->getTypeAllocSize(AllocTy));
2386
2387  Constant *C = ConstantExpr::getSizeOf(AllocTy);
2388  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2389    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2390      C = Folded;
2391  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2392  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2393}
2394
2395const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
2396  Constant *C = ConstantExpr::getAlignOf(AllocTy);
2397  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2398    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2399      C = Folded;
2400  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2401  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2402}
2403
2404const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
2405                                             unsigned FieldNo) {
2406  // If we have TargetData, we can bypass creating a target-independent
2407  // constant expression and then folding it back into a ConstantInt.
2408  // This is just a compile-time optimization.
2409  if (TD)
2410    return getConstant(TD->getIntPtrType(getContext()),
2411                       TD->getStructLayout(STy)->getElementOffset(FieldNo));
2412
2413  Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2414  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2415    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2416      C = Folded;
2417  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2418  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2419}
2420
2421const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
2422                                             Constant *FieldNo) {
2423  Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2424  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2425    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2426      C = Folded;
2427  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2428  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2429}
2430
2431const SCEV *ScalarEvolution::getUnknown(Value *V) {
2432  // Don't attempt to do anything other than create a SCEVUnknown object
2433  // here.  createSCEV only calls getUnknown after checking for all other
2434  // interesting possibilities, and any other code that calls getUnknown
2435  // is doing so in order to hide a value from SCEV canonicalization.
2436
2437  FoldingSetNodeID ID;
2438  ID.AddInteger(scUnknown);
2439  ID.AddPointer(V);
2440  void *IP = 0;
2441  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
2442    assert(cast<SCEVUnknown>(S)->getValue() == V &&
2443           "Stale SCEVUnknown in uniquing map!");
2444    return S;
2445  }
2446  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
2447                                            FirstUnknown);
2448  FirstUnknown = cast<SCEVUnknown>(S);
2449  UniqueSCEVs.InsertNode(S, IP);
2450  return S;
2451}
2452
2453//===----------------------------------------------------------------------===//
2454//            Basic SCEV Analysis and PHI Idiom Recognition Code
2455//
2456
2457/// isSCEVable - Test if values of the given type are analyzable within
2458/// the SCEV framework. This primarily includes integer types, and it
2459/// can optionally include pointer types if the ScalarEvolution class
2460/// has access to target-specific information.
2461bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2462  // Integers and pointers are always SCEVable.
2463  return Ty->isIntegerTy() || Ty->isPointerTy();
2464}
2465
2466/// getTypeSizeInBits - Return the size in bits of the specified type,
2467/// for which isSCEVable must return true.
2468uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2469  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2470
2471  // If we have a TargetData, use it!
2472  if (TD)
2473    return TD->getTypeSizeInBits(Ty);
2474
2475  // Integer types have fixed sizes.
2476  if (Ty->isIntegerTy())
2477    return Ty->getPrimitiveSizeInBits();
2478
2479  // The only other support type is pointer. Without TargetData, conservatively
2480  // assume pointers are 64-bit.
2481  assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2482  return 64;
2483}
2484
2485/// getEffectiveSCEVType - Return a type with the same bitwidth as
2486/// the given type and which represents how SCEV will treat the given
2487/// type, for which isSCEVable must return true. For pointer types,
2488/// this is the pointer-sized integer type.
2489const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2490  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2491
2492  if (Ty->isIntegerTy())
2493    return Ty;
2494
2495  // The only other support type is pointer.
2496  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2497  if (TD) return TD->getIntPtrType(getContext());
2498
2499  // Without TargetData, conservatively assume pointers are 64-bit.
2500  return Type::getInt64Ty(getContext());
2501}
2502
2503const SCEV *ScalarEvolution::getCouldNotCompute() {
2504  return &CouldNotCompute;
2505}
2506
2507/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2508/// expression and create a new one.
2509const SCEV *ScalarEvolution::getSCEV(Value *V) {
2510  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2511
2512  ValueExprMapType::const_iterator I = ValueExprMap.find(V);
2513  if (I != ValueExprMap.end()) return I->second;
2514  const SCEV *S = createSCEV(V);
2515
2516  // The process of creating a SCEV for V may have caused other SCEVs
2517  // to have been created, so it's necessary to insert the new entry
2518  // from scratch, rather than trying to remember the insert position
2519  // above.
2520  ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2521  return S;
2522}
2523
2524/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2525///
2526const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2527  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2528    return getConstant(
2529               cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2530
2531  const Type *Ty = V->getType();
2532  Ty = getEffectiveSCEVType(Ty);
2533  return getMulExpr(V,
2534                  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2535}
2536
2537/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2538const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2539  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2540    return getConstant(
2541                cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2542
2543  const Type *Ty = V->getType();
2544  Ty = getEffectiveSCEVType(Ty);
2545  const SCEV *AllOnes =
2546                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2547  return getMinusSCEV(AllOnes, V);
2548}
2549
2550/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2551///
2552const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2553                                          const SCEV *RHS) {
2554  // Fast path: X - X --> 0.
2555  if (LHS == RHS)
2556    return getConstant(LHS->getType(), 0);
2557
2558  // X - Y --> X + -Y
2559  return getAddExpr(LHS, getNegativeSCEV(RHS));
2560}
2561
2562/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2563/// input value to the specified type.  If the type must be extended, it is zero
2564/// extended.
2565const SCEV *
2566ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2567                                         const Type *Ty) {
2568  const Type *SrcTy = V->getType();
2569  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2570         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2571         "Cannot truncate or zero extend with non-integer arguments!");
2572  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2573    return V;  // No conversion
2574  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2575    return getTruncateExpr(V, Ty);
2576  return getZeroExtendExpr(V, Ty);
2577}
2578
2579/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2580/// input value to the specified type.  If the type must be extended, it is sign
2581/// extended.
2582const SCEV *
2583ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2584                                         const Type *Ty) {
2585  const Type *SrcTy = V->getType();
2586  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2587         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2588         "Cannot truncate or zero extend with non-integer arguments!");
2589  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2590    return V;  // No conversion
2591  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2592    return getTruncateExpr(V, Ty);
2593  return getSignExtendExpr(V, Ty);
2594}
2595
2596/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2597/// input value to the specified type.  If the type must be extended, it is zero
2598/// extended.  The conversion must not be narrowing.
2599const SCEV *
2600ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2601  const Type *SrcTy = V->getType();
2602  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2603         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2604         "Cannot noop or zero extend with non-integer arguments!");
2605  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2606         "getNoopOrZeroExtend cannot truncate!");
2607  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2608    return V;  // No conversion
2609  return getZeroExtendExpr(V, Ty);
2610}
2611
2612/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2613/// input value to the specified type.  If the type must be extended, it is sign
2614/// extended.  The conversion must not be narrowing.
2615const SCEV *
2616ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2617  const Type *SrcTy = V->getType();
2618  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2619         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2620         "Cannot noop or sign extend with non-integer arguments!");
2621  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2622         "getNoopOrSignExtend cannot truncate!");
2623  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2624    return V;  // No conversion
2625  return getSignExtendExpr(V, Ty);
2626}
2627
2628/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2629/// the input value to the specified type. If the type must be extended,
2630/// it is extended with unspecified bits. The conversion must not be
2631/// narrowing.
2632const SCEV *
2633ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2634  const Type *SrcTy = V->getType();
2635  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2636         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2637         "Cannot noop or any extend with non-integer arguments!");
2638  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2639         "getNoopOrAnyExtend cannot truncate!");
2640  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2641    return V;  // No conversion
2642  return getAnyExtendExpr(V, Ty);
2643}
2644
2645/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2646/// input value to the specified type.  The conversion must not be widening.
2647const SCEV *
2648ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2649  const Type *SrcTy = V->getType();
2650  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2651         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2652         "Cannot truncate or noop with non-integer arguments!");
2653  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2654         "getTruncateOrNoop cannot extend!");
2655  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2656    return V;  // No conversion
2657  return getTruncateExpr(V, Ty);
2658}
2659
2660/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2661/// the types using zero-extension, and then perform a umax operation
2662/// with them.
2663const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2664                                                        const SCEV *RHS) {
2665  const SCEV *PromotedLHS = LHS;
2666  const SCEV *PromotedRHS = RHS;
2667
2668  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2669    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2670  else
2671    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2672
2673  return getUMaxExpr(PromotedLHS, PromotedRHS);
2674}
2675
2676/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2677/// the types using zero-extension, and then perform a umin operation
2678/// with them.
2679const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2680                                                        const SCEV *RHS) {
2681  const SCEV *PromotedLHS = LHS;
2682  const SCEV *PromotedRHS = RHS;
2683
2684  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2685    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2686  else
2687    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2688
2689  return getUMinExpr(PromotedLHS, PromotedRHS);
2690}
2691
2692/// PushDefUseChildren - Push users of the given Instruction
2693/// onto the given Worklist.
2694static void
2695PushDefUseChildren(Instruction *I,
2696                   SmallVectorImpl<Instruction *> &Worklist) {
2697  // Push the def-use children onto the Worklist stack.
2698  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2699       UI != UE; ++UI)
2700    Worklist.push_back(cast<Instruction>(*UI));
2701}
2702
2703/// ForgetSymbolicValue - This looks up computed SCEV values for all
2704/// instructions that depend on the given instruction and removes them from
2705/// the ValueExprMapType map if they reference SymName. This is used during PHI
2706/// resolution.
2707void
2708ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2709  SmallVector<Instruction *, 16> Worklist;
2710  PushDefUseChildren(PN, Worklist);
2711
2712  SmallPtrSet<Instruction *, 8> Visited;
2713  Visited.insert(PN);
2714  while (!Worklist.empty()) {
2715    Instruction *I = Worklist.pop_back_val();
2716    if (!Visited.insert(I)) continue;
2717
2718    ValueExprMapType::iterator It =
2719      ValueExprMap.find(static_cast<Value *>(I));
2720    if (It != ValueExprMap.end()) {
2721      const SCEV *Old = It->second;
2722
2723      // Short-circuit the def-use traversal if the symbolic name
2724      // ceases to appear in expressions.
2725      if (Old != SymName && !Old->hasOperand(SymName))
2726        continue;
2727
2728      // SCEVUnknown for a PHI either means that it has an unrecognized
2729      // structure, it's a PHI that's in the progress of being computed
2730      // by createNodeForPHI, or it's a single-value PHI. In the first case,
2731      // additional loop trip count information isn't going to change anything.
2732      // In the second case, createNodeForPHI will perform the necessary
2733      // updates on its own when it gets to that point. In the third, we do
2734      // want to forget the SCEVUnknown.
2735      if (!isa<PHINode>(I) ||
2736          !isa<SCEVUnknown>(Old) ||
2737          (I != PN && Old == SymName)) {
2738        ValuesAtScopes.erase(Old);
2739        UnsignedRanges.erase(Old);
2740        SignedRanges.erase(Old);
2741        ValueExprMap.erase(It);
2742      }
2743    }
2744
2745    PushDefUseChildren(I, Worklist);
2746  }
2747}
2748
2749/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2750/// a loop header, making it a potential recurrence, or it doesn't.
2751///
2752const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2753  if (const Loop *L = LI->getLoopFor(PN->getParent()))
2754    if (L->getHeader() == PN->getParent()) {
2755      // The loop may have multiple entrances or multiple exits; we can analyze
2756      // this phi as an addrec if it has a unique entry value and a unique
2757      // backedge value.
2758      Value *BEValueV = 0, *StartValueV = 0;
2759      for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2760        Value *V = PN->getIncomingValue(i);
2761        if (L->contains(PN->getIncomingBlock(i))) {
2762          if (!BEValueV) {
2763            BEValueV = V;
2764          } else if (BEValueV != V) {
2765            BEValueV = 0;
2766            break;
2767          }
2768        } else if (!StartValueV) {
2769          StartValueV = V;
2770        } else if (StartValueV != V) {
2771          StartValueV = 0;
2772          break;
2773        }
2774      }
2775      if (BEValueV && StartValueV) {
2776        // While we are analyzing this PHI node, handle its value symbolically.
2777        const SCEV *SymbolicName = getUnknown(PN);
2778        assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
2779               "PHI node already processed?");
2780        ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2781
2782        // Using this symbolic name for the PHI, analyze the value coming around
2783        // the back-edge.
2784        const SCEV *BEValue = getSCEV(BEValueV);
2785
2786        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2787        // has a special value for the first iteration of the loop.
2788
2789        // If the value coming around the backedge is an add with the symbolic
2790        // value we just inserted, then we found a simple induction variable!
2791        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2792          // If there is a single occurrence of the symbolic value, replace it
2793          // with a recurrence.
2794          unsigned FoundIndex = Add->getNumOperands();
2795          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2796            if (Add->getOperand(i) == SymbolicName)
2797              if (FoundIndex == e) {
2798                FoundIndex = i;
2799                break;
2800              }
2801
2802          if (FoundIndex != Add->getNumOperands()) {
2803            // Create an add with everything but the specified operand.
2804            SmallVector<const SCEV *, 8> Ops;
2805            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2806              if (i != FoundIndex)
2807                Ops.push_back(Add->getOperand(i));
2808            const SCEV *Accum = getAddExpr(Ops);
2809
2810            // This is not a valid addrec if the step amount is varying each
2811            // loop iteration, but is not itself an addrec in this loop.
2812            if (Accum->isLoopInvariant(L) ||
2813                (isa<SCEVAddRecExpr>(Accum) &&
2814                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2815              bool HasNUW = false;
2816              bool HasNSW = false;
2817
2818              // If the increment doesn't overflow, then neither the addrec nor
2819              // the post-increment will overflow.
2820              if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
2821                if (OBO->hasNoUnsignedWrap())
2822                  HasNUW = true;
2823                if (OBO->hasNoSignedWrap())
2824                  HasNSW = true;
2825              }
2826
2827              const SCEV *StartVal = getSCEV(StartValueV);
2828              const SCEV *PHISCEV =
2829                getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
2830
2831              // Since the no-wrap flags are on the increment, they apply to the
2832              // post-incremented value as well.
2833              if (Accum->isLoopInvariant(L))
2834                (void)getAddRecExpr(getAddExpr(StartVal, Accum),
2835                                    Accum, L, HasNUW, HasNSW);
2836
2837              // Okay, for the entire analysis of this edge we assumed the PHI
2838              // to be symbolic.  We now need to go back and purge all of the
2839              // entries for the scalars that use the symbolic expression.
2840              ForgetSymbolicName(PN, SymbolicName);
2841              ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
2842              return PHISCEV;
2843            }
2844          }
2845        } else if (const SCEVAddRecExpr *AddRec =
2846                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2847          // Otherwise, this could be a loop like this:
2848          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2849          // In this case, j = {1,+,1}  and BEValue is j.
2850          // Because the other in-value of i (0) fits the evolution of BEValue
2851          // i really is an addrec evolution.
2852          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2853            const SCEV *StartVal = getSCEV(StartValueV);
2854
2855            // If StartVal = j.start - j.stride, we can use StartVal as the
2856            // initial step of the addrec evolution.
2857            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2858                                         AddRec->getOperand(1))) {
2859              const SCEV *PHISCEV =
2860                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2861
2862              // Okay, for the entire analysis of this edge we assumed the PHI
2863              // to be symbolic.  We now need to go back and purge all of the
2864              // entries for the scalars that use the symbolic expression.
2865              ForgetSymbolicName(PN, SymbolicName);
2866              ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
2867              return PHISCEV;
2868            }
2869          }
2870        }
2871      }
2872    }
2873
2874  // If the PHI has a single incoming value, follow that value, unless the
2875  // PHI's incoming blocks are in a different loop, in which case doing so
2876  // risks breaking LCSSA form. Instcombine would normally zap these, but
2877  // it doesn't have DominatorTree information, so it may miss cases.
2878  if (Value *V = SimplifyInstruction(PN, TD, DT)) {
2879    // TODO: The following check is suboptimal.  For example, it is pointless
2880    // if V is a constant.  Since the problematic case is if V is defined inside
2881    // a deeper loop, it would be better to check for that directly.
2882    bool AllSameLoop = true;
2883    Loop *PNLoop = LI->getLoopFor(PN->getParent());
2884    for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2885      if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
2886        AllSameLoop = false;
2887        break;
2888      }
2889    if (AllSameLoop)
2890      return getSCEV(V);
2891  }
2892
2893  // If it's not a loop phi, we can't handle it yet.
2894  return getUnknown(PN);
2895}
2896
2897/// createNodeForGEP - Expand GEP instructions into add and multiply
2898/// operations. This allows them to be analyzed by regular SCEV code.
2899///
2900const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
2901
2902  // Don't blindly transfer the inbounds flag from the GEP instruction to the
2903  // Add expression, because the Instruction may be guarded by control flow
2904  // and the no-overflow bits may not be valid for the expression in any
2905  // context.
2906
2907  const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
2908  Value *Base = GEP->getOperand(0);
2909  // Don't attempt to analyze GEPs over unsized objects.
2910  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2911    return getUnknown(GEP);
2912  const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
2913  gep_type_iterator GTI = gep_type_begin(GEP);
2914  for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
2915                                      E = GEP->op_end();
2916       I != E; ++I) {
2917    Value *Index = *I;
2918    // Compute the (potentially symbolic) offset in bytes for this index.
2919    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2920      // For a struct, add the member offset.
2921      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2922      const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
2923
2924      // Add the field offset to the running total offset.
2925      TotalOffset = getAddExpr(TotalOffset, FieldOffset);
2926    } else {
2927      // For an array, add the element offset, explicitly scaled.
2928      const SCEV *ElementSize = getSizeOfExpr(*GTI);
2929      const SCEV *IndexS = getSCEV(Index);
2930      // Getelementptr indices are signed.
2931      IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
2932
2933      // Multiply the index by the element size to compute the element offset.
2934      const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
2935
2936      // Add the element offset to the running total offset.
2937      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2938    }
2939  }
2940
2941  // Get the SCEV for the GEP base.
2942  const SCEV *BaseS = getSCEV(Base);
2943
2944  // Add the total offset from all the GEP indices to the base.
2945  return getAddExpr(BaseS, TotalOffset);
2946}
2947
2948/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2949/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2950/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2951/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2952uint32_t
2953ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2954  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2955    return C->getValue()->getValue().countTrailingZeros();
2956
2957  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2958    return std::min(GetMinTrailingZeros(T->getOperand()),
2959                    (uint32_t)getTypeSizeInBits(T->getType()));
2960
2961  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2962    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2963    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2964             getTypeSizeInBits(E->getType()) : OpRes;
2965  }
2966
2967  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2968    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2969    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2970             getTypeSizeInBits(E->getType()) : OpRes;
2971  }
2972
2973  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2974    // The result is the min of all operands results.
2975    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2976    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2977      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2978    return MinOpRes;
2979  }
2980
2981  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2982    // The result is the sum of all operands results.
2983    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2984    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2985    for (unsigned i = 1, e = M->getNumOperands();
2986         SumOpRes != BitWidth && i != e; ++i)
2987      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2988                          BitWidth);
2989    return SumOpRes;
2990  }
2991
2992  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2993    // The result is the min of all operands results.
2994    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2995    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2996      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2997    return MinOpRes;
2998  }
2999
3000  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
3001    // The result is the min of all operands results.
3002    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3003    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3004      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3005    return MinOpRes;
3006  }
3007
3008  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
3009    // The result is the min of all operands results.
3010    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3011    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3012      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3013    return MinOpRes;
3014  }
3015
3016  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3017    // For a SCEVUnknown, ask ValueTracking.
3018    unsigned BitWidth = getTypeSizeInBits(U->getType());
3019    APInt Mask = APInt::getAllOnesValue(BitWidth);
3020    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3021    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
3022    return Zeros.countTrailingOnes();
3023  }
3024
3025  // SCEVUDivExpr
3026  return 0;
3027}
3028
3029/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
3030///
3031ConstantRange
3032ScalarEvolution::getUnsignedRange(const SCEV *S) {
3033  // See if we've computed this range already.
3034  DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
3035  if (I != UnsignedRanges.end())
3036    return I->second;
3037
3038  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3039    return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
3040
3041  unsigned BitWidth = getTypeSizeInBits(S->getType());
3042  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3043
3044  // If the value has known zeros, the maximum unsigned value will have those
3045  // known zeros as well.
3046  uint32_t TZ = GetMinTrailingZeros(S);
3047  if (TZ != 0)
3048    ConservativeResult =
3049      ConstantRange(APInt::getMinValue(BitWidth),
3050                    APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
3051
3052  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3053    ConstantRange X = getUnsignedRange(Add->getOperand(0));
3054    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3055      X = X.add(getUnsignedRange(Add->getOperand(i)));
3056    return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
3057  }
3058
3059  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3060    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
3061    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3062      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
3063    return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
3064  }
3065
3066  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3067    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
3068    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3069      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
3070    return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
3071  }
3072
3073  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3074    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
3075    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3076      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
3077    return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
3078  }
3079
3080  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3081    ConstantRange X = getUnsignedRange(UDiv->getLHS());
3082    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
3083    return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3084  }
3085
3086  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3087    ConstantRange X = getUnsignedRange(ZExt->getOperand());
3088    return setUnsignedRange(ZExt,
3089      ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3090  }
3091
3092  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3093    ConstantRange X = getUnsignedRange(SExt->getOperand());
3094    return setUnsignedRange(SExt,
3095      ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3096  }
3097
3098  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3099    ConstantRange X = getUnsignedRange(Trunc->getOperand());
3100    return setUnsignedRange(Trunc,
3101      ConservativeResult.intersectWith(X.truncate(BitWidth)));
3102  }
3103
3104  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3105    // If there's no unsigned wrap, the value will never be less than its
3106    // initial value.
3107    if (AddRec->hasNoUnsignedWrap())
3108      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
3109        if (!C->getValue()->isZero())
3110          ConservativeResult =
3111            ConservativeResult.intersectWith(
3112              ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
3113
3114    // TODO: non-affine addrec
3115    if (AddRec->isAffine()) {
3116      const Type *Ty = AddRec->getType();
3117      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3118      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3119          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3120        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3121
3122        const SCEV *Start = AddRec->getStart();
3123        const SCEV *Step = AddRec->getStepRecurrence(*this);
3124
3125        ConstantRange StartRange = getUnsignedRange(Start);
3126        ConstantRange StepRange = getSignedRange(Step);
3127        ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3128        ConstantRange EndRange =
3129          StartRange.add(MaxBECountRange.multiply(StepRange));
3130
3131        // Check for overflow. This must be done with ConstantRange arithmetic
3132        // because we could be called from within the ScalarEvolution overflow
3133        // checking code.
3134        ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
3135        ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3136        ConstantRange ExtMaxBECountRange =
3137          MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3138        ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
3139        if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3140            ExtEndRange)
3141          return setUnsignedRange(AddRec, ConservativeResult);
3142
3143        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
3144                                   EndRange.getUnsignedMin());
3145        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
3146                                   EndRange.getUnsignedMax());
3147        if (Min.isMinValue() && Max.isMaxValue())
3148          return setUnsignedRange(AddRec, ConservativeResult);
3149        return setUnsignedRange(AddRec,
3150          ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3151      }
3152    }
3153
3154    return setUnsignedRange(AddRec, ConservativeResult);
3155  }
3156
3157  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3158    // For a SCEVUnknown, ask ValueTracking.
3159    APInt Mask = APInt::getAllOnesValue(BitWidth);
3160    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3161    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
3162    if (Ones == ~Zeros + 1)
3163      return setUnsignedRange(U, ConservativeResult);
3164    return setUnsignedRange(U,
3165      ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
3166  }
3167
3168  return setUnsignedRange(S, ConservativeResult);
3169}
3170
3171/// getSignedRange - Determine the signed range for a particular SCEV.
3172///
3173ConstantRange
3174ScalarEvolution::getSignedRange(const SCEV *S) {
3175  DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
3176  if (I != SignedRanges.end())
3177    return I->second;
3178
3179  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3180    return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
3181
3182  unsigned BitWidth = getTypeSizeInBits(S->getType());
3183  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3184
3185  // If the value has known zeros, the maximum signed value will have those
3186  // known zeros as well.
3187  uint32_t TZ = GetMinTrailingZeros(S);
3188  if (TZ != 0)
3189    ConservativeResult =
3190      ConstantRange(APInt::getSignedMinValue(BitWidth),
3191                    APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3192
3193  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3194    ConstantRange X = getSignedRange(Add->getOperand(0));
3195    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3196      X = X.add(getSignedRange(Add->getOperand(i)));
3197    return setSignedRange(Add, ConservativeResult.intersectWith(X));
3198  }
3199
3200  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3201    ConstantRange X = getSignedRange(Mul->getOperand(0));
3202    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3203      X = X.multiply(getSignedRange(Mul->getOperand(i)));
3204    return setSignedRange(Mul, ConservativeResult.intersectWith(X));
3205  }
3206
3207  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3208    ConstantRange X = getSignedRange(SMax->getOperand(0));
3209    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3210      X = X.smax(getSignedRange(SMax->getOperand(i)));
3211    return setSignedRange(SMax, ConservativeResult.intersectWith(X));
3212  }
3213
3214  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3215    ConstantRange X = getSignedRange(UMax->getOperand(0));
3216    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3217      X = X.umax(getSignedRange(UMax->getOperand(i)));
3218    return setSignedRange(UMax, ConservativeResult.intersectWith(X));
3219  }
3220
3221  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3222    ConstantRange X = getSignedRange(UDiv->getLHS());
3223    ConstantRange Y = getSignedRange(UDiv->getRHS());
3224    return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3225  }
3226
3227  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3228    ConstantRange X = getSignedRange(ZExt->getOperand());
3229    return setSignedRange(ZExt,
3230      ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3231  }
3232
3233  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3234    ConstantRange X = getSignedRange(SExt->getOperand());
3235    return setSignedRange(SExt,
3236      ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3237  }
3238
3239  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3240    ConstantRange X = getSignedRange(Trunc->getOperand());
3241    return setSignedRange(Trunc,
3242      ConservativeResult.intersectWith(X.truncate(BitWidth)));
3243  }
3244
3245  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3246    // If there's no signed wrap, and all the operands have the same sign or
3247    // zero, the value won't ever change sign.
3248    if (AddRec->hasNoSignedWrap()) {
3249      bool AllNonNeg = true;
3250      bool AllNonPos = true;
3251      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3252        if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3253        if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3254      }
3255      if (AllNonNeg)
3256        ConservativeResult = ConservativeResult.intersectWith(
3257          ConstantRange(APInt(BitWidth, 0),
3258                        APInt::getSignedMinValue(BitWidth)));
3259      else if (AllNonPos)
3260        ConservativeResult = ConservativeResult.intersectWith(
3261          ConstantRange(APInt::getSignedMinValue(BitWidth),
3262                        APInt(BitWidth, 1)));
3263    }
3264
3265    // TODO: non-affine addrec
3266    if (AddRec->isAffine()) {
3267      const Type *Ty = AddRec->getType();
3268      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3269      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3270          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3271        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3272
3273        const SCEV *Start = AddRec->getStart();
3274        const SCEV *Step = AddRec->getStepRecurrence(*this);
3275
3276        ConstantRange StartRange = getSignedRange(Start);
3277        ConstantRange StepRange = getSignedRange(Step);
3278        ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3279        ConstantRange EndRange =
3280          StartRange.add(MaxBECountRange.multiply(StepRange));
3281
3282        // Check for overflow. This must be done with ConstantRange arithmetic
3283        // because we could be called from within the ScalarEvolution overflow
3284        // checking code.
3285        ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3286        ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3287        ConstantRange ExtMaxBECountRange =
3288          MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3289        ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3290        if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3291            ExtEndRange)
3292          return setSignedRange(AddRec, ConservativeResult);
3293
3294        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3295                                   EndRange.getSignedMin());
3296        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3297                                   EndRange.getSignedMax());
3298        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3299          return setSignedRange(AddRec, ConservativeResult);
3300        return setSignedRange(AddRec,
3301          ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3302      }
3303    }
3304
3305    return setSignedRange(AddRec, ConservativeResult);
3306  }
3307
3308  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3309    // For a SCEVUnknown, ask ValueTracking.
3310    if (!U->getValue()->getType()->isIntegerTy() && !TD)
3311      return setSignedRange(U, ConservativeResult);
3312    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3313    if (NS == 1)
3314      return setSignedRange(U, ConservativeResult);
3315    return setSignedRange(U, ConservativeResult.intersectWith(
3316      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3317                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
3318  }
3319
3320  return setSignedRange(S, ConservativeResult);
3321}
3322
3323/// createSCEV - We know that there is no SCEV for the specified value.
3324/// Analyze the expression.
3325///
3326const SCEV *ScalarEvolution::createSCEV(Value *V) {
3327  if (!isSCEVable(V->getType()))
3328    return getUnknown(V);
3329
3330  unsigned Opcode = Instruction::UserOp1;
3331  if (Instruction *I = dyn_cast<Instruction>(V)) {
3332    Opcode = I->getOpcode();
3333
3334    // Don't attempt to analyze instructions in blocks that aren't
3335    // reachable. Such instructions don't matter, and they aren't required
3336    // to obey basic rules for definitions dominating uses which this
3337    // analysis depends on.
3338    if (!DT->isReachableFromEntry(I->getParent()))
3339      return getUnknown(V);
3340  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3341    Opcode = CE->getOpcode();
3342  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3343    return getConstant(CI);
3344  else if (isa<ConstantPointerNull>(V))
3345    return getConstant(V->getType(), 0);
3346  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3347    return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3348  else
3349    return getUnknown(V);
3350
3351  Operator *U = cast<Operator>(V);
3352  switch (Opcode) {
3353  case Instruction::Add: {
3354    // The simple thing to do would be to just call getSCEV on both operands
3355    // and call getAddExpr with the result. However if we're looking at a
3356    // bunch of things all added together, this can be quite inefficient,
3357    // because it leads to N-1 getAddExpr calls for N ultimate operands.
3358    // Instead, gather up all the operands and make a single getAddExpr call.
3359    // LLVM IR canonical form means we need only traverse the left operands.
3360    SmallVector<const SCEV *, 4> AddOps;
3361    AddOps.push_back(getSCEV(U->getOperand(1)));
3362    for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
3363      unsigned Opcode = Op->getValueID() - Value::InstructionVal;
3364      if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
3365        break;
3366      U = cast<Operator>(Op);
3367      const SCEV *Op1 = getSCEV(U->getOperand(1));
3368      if (Opcode == Instruction::Sub)
3369        AddOps.push_back(getNegativeSCEV(Op1));
3370      else
3371        AddOps.push_back(Op1);
3372    }
3373    AddOps.push_back(getSCEV(U->getOperand(0)));
3374    return getAddExpr(AddOps);
3375  }
3376  case Instruction::Mul: {
3377    // See the Add code above.
3378    SmallVector<const SCEV *, 4> MulOps;
3379    MulOps.push_back(getSCEV(U->getOperand(1)));
3380    for (Value *Op = U->getOperand(0);
3381         Op->getValueID() == Instruction::Mul + Value::InstructionVal;
3382         Op = U->getOperand(0)) {
3383      U = cast<Operator>(Op);
3384      MulOps.push_back(getSCEV(U->getOperand(1)));
3385    }
3386    MulOps.push_back(getSCEV(U->getOperand(0)));
3387    return getMulExpr(MulOps);
3388  }
3389  case Instruction::UDiv:
3390    return getUDivExpr(getSCEV(U->getOperand(0)),
3391                       getSCEV(U->getOperand(1)));
3392  case Instruction::Sub:
3393    return getMinusSCEV(getSCEV(U->getOperand(0)),
3394                        getSCEV(U->getOperand(1)));
3395  case Instruction::And:
3396    // For an expression like x&255 that merely masks off the high bits,
3397    // use zext(trunc(x)) as the SCEV expression.
3398    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3399      if (CI->isNullValue())
3400        return getSCEV(U->getOperand(1));
3401      if (CI->isAllOnesValue())
3402        return getSCEV(U->getOperand(0));
3403      const APInt &A = CI->getValue();
3404
3405      // Instcombine's ShrinkDemandedConstant may strip bits out of
3406      // constants, obscuring what would otherwise be a low-bits mask.
3407      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3408      // knew about to reconstruct a low-bits mask value.
3409      unsigned LZ = A.countLeadingZeros();
3410      unsigned BitWidth = A.getBitWidth();
3411      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3412      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3413      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3414
3415      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3416
3417      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3418        return
3419          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3420                                IntegerType::get(getContext(), BitWidth - LZ)),
3421                            U->getType());
3422    }
3423    break;
3424
3425  case Instruction::Or:
3426    // If the RHS of the Or is a constant, we may have something like:
3427    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
3428    // optimizations will transparently handle this case.
3429    //
3430    // In order for this transformation to be safe, the LHS must be of the
3431    // form X*(2^n) and the Or constant must be less than 2^n.
3432    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3433      const SCEV *LHS = getSCEV(U->getOperand(0));
3434      const APInt &CIVal = CI->getValue();
3435      if (GetMinTrailingZeros(LHS) >=
3436          (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3437        // Build a plain add SCEV.
3438        const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3439        // If the LHS of the add was an addrec and it has no-wrap flags,
3440        // transfer the no-wrap flags, since an or won't introduce a wrap.
3441        if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3442          const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3443          if (OldAR->hasNoUnsignedWrap())
3444            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
3445          if (OldAR->hasNoSignedWrap())
3446            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
3447        }
3448        return S;
3449      }
3450    }
3451    break;
3452  case Instruction::Xor:
3453    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3454      // If the RHS of the xor is a signbit, then this is just an add.
3455      // Instcombine turns add of signbit into xor as a strength reduction step.
3456      if (CI->getValue().isSignBit())
3457        return getAddExpr(getSCEV(U->getOperand(0)),
3458                          getSCEV(U->getOperand(1)));
3459
3460      // If the RHS of xor is -1, then this is a not operation.
3461      if (CI->isAllOnesValue())
3462        return getNotSCEV(getSCEV(U->getOperand(0)));
3463
3464      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3465      // This is a variant of the check for xor with -1, and it handles
3466      // the case where instcombine has trimmed non-demanded bits out
3467      // of an xor with -1.
3468      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3469        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3470          if (BO->getOpcode() == Instruction::And &&
3471              LCI->getValue() == CI->getValue())
3472            if (const SCEVZeroExtendExpr *Z =
3473                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3474              const Type *UTy = U->getType();
3475              const SCEV *Z0 = Z->getOperand();
3476              const Type *Z0Ty = Z0->getType();
3477              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3478
3479              // If C is a low-bits mask, the zero extend is serving to
3480              // mask off the high bits. Complement the operand and
3481              // re-apply the zext.
3482              if (APIntOps::isMask(Z0TySize, CI->getValue()))
3483                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3484
3485              // If C is a single bit, it may be in the sign-bit position
3486              // before the zero-extend. In this case, represent the xor
3487              // using an add, which is equivalent, and re-apply the zext.
3488              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
3489              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3490                  Trunc.isSignBit())
3491                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3492                                         UTy);
3493            }
3494    }
3495    break;
3496
3497  case Instruction::Shl:
3498    // Turn shift left of a constant amount into a multiply.
3499    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3500      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3501
3502      // If the shift count is not less than the bitwidth, the result of
3503      // the shift is undefined. Don't try to analyze it, because the
3504      // resolution chosen here may differ from the resolution chosen in
3505      // other parts of the compiler.
3506      if (SA->getValue().uge(BitWidth))
3507        break;
3508
3509      Constant *X = ConstantInt::get(getContext(),
3510        APInt(BitWidth, 1).shl(SA->getZExtValue()));
3511      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3512    }
3513    break;
3514
3515  case Instruction::LShr:
3516    // Turn logical shift right of a constant into a unsigned divide.
3517    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3518      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3519
3520      // If the shift count is not less than the bitwidth, the result of
3521      // the shift is undefined. Don't try to analyze it, because the
3522      // resolution chosen here may differ from the resolution chosen in
3523      // other parts of the compiler.
3524      if (SA->getValue().uge(BitWidth))
3525        break;
3526
3527      Constant *X = ConstantInt::get(getContext(),
3528        APInt(BitWidth, 1).shl(SA->getZExtValue()));
3529      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3530    }
3531    break;
3532
3533  case Instruction::AShr:
3534    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3535    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3536      if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
3537        if (L->getOpcode() == Instruction::Shl &&
3538            L->getOperand(1) == U->getOperand(1)) {
3539          uint64_t BitWidth = getTypeSizeInBits(U->getType());
3540
3541          // If the shift count is not less than the bitwidth, the result of
3542          // the shift is undefined. Don't try to analyze it, because the
3543          // resolution chosen here may differ from the resolution chosen in
3544          // other parts of the compiler.
3545          if (CI->getValue().uge(BitWidth))
3546            break;
3547
3548          uint64_t Amt = BitWidth - CI->getZExtValue();
3549          if (Amt == BitWidth)
3550            return getSCEV(L->getOperand(0));       // shift by zero --> noop
3551          return
3552            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3553                                              IntegerType::get(getContext(),
3554                                                               Amt)),
3555                              U->getType());
3556        }
3557    break;
3558
3559  case Instruction::Trunc:
3560    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3561
3562  case Instruction::ZExt:
3563    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3564
3565  case Instruction::SExt:
3566    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3567
3568  case Instruction::BitCast:
3569    // BitCasts are no-op casts so we just eliminate the cast.
3570    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3571      return getSCEV(U->getOperand(0));
3572    break;
3573
3574  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3575  // lead to pointer expressions which cannot safely be expanded to GEPs,
3576  // because ScalarEvolution doesn't respect the GEP aliasing rules when
3577  // simplifying integer expressions.
3578
3579  case Instruction::GetElementPtr:
3580    return createNodeForGEP(cast<GEPOperator>(U));
3581
3582  case Instruction::PHI:
3583    return createNodeForPHI(cast<PHINode>(U));
3584
3585  case Instruction::Select:
3586    // This could be a smax or umax that was lowered earlier.
3587    // Try to recover it.
3588    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3589      Value *LHS = ICI->getOperand(0);
3590      Value *RHS = ICI->getOperand(1);
3591      switch (ICI->getPredicate()) {
3592      case ICmpInst::ICMP_SLT:
3593      case ICmpInst::ICMP_SLE:
3594        std::swap(LHS, RHS);
3595        // fall through
3596      case ICmpInst::ICMP_SGT:
3597      case ICmpInst::ICMP_SGE:
3598        // a >s b ? a+x : b+x  ->  smax(a, b)+x
3599        // a >s b ? b+x : a+x  ->  smin(a, b)+x
3600        if (LHS->getType() == U->getType()) {
3601          const SCEV *LS = getSCEV(LHS);
3602          const SCEV *RS = getSCEV(RHS);
3603          const SCEV *LA = getSCEV(U->getOperand(1));
3604          const SCEV *RA = getSCEV(U->getOperand(2));
3605          const SCEV *LDiff = getMinusSCEV(LA, LS);
3606          const SCEV *RDiff = getMinusSCEV(RA, RS);
3607          if (LDiff == RDiff)
3608            return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3609          LDiff = getMinusSCEV(LA, RS);
3610          RDiff = getMinusSCEV(RA, LS);
3611          if (LDiff == RDiff)
3612            return getAddExpr(getSMinExpr(LS, RS), LDiff);
3613        }
3614        break;
3615      case ICmpInst::ICMP_ULT:
3616      case ICmpInst::ICMP_ULE:
3617        std::swap(LHS, RHS);
3618        // fall through
3619      case ICmpInst::ICMP_UGT:
3620      case ICmpInst::ICMP_UGE:
3621        // a >u b ? a+x : b+x  ->  umax(a, b)+x
3622        // a >u b ? b+x : a+x  ->  umin(a, b)+x
3623        if (LHS->getType() == U->getType()) {
3624          const SCEV *LS = getSCEV(LHS);
3625          const SCEV *RS = getSCEV(RHS);
3626          const SCEV *LA = getSCEV(U->getOperand(1));
3627          const SCEV *RA = getSCEV(U->getOperand(2));
3628          const SCEV *LDiff = getMinusSCEV(LA, LS);
3629          const SCEV *RDiff = getMinusSCEV(RA, RS);
3630          if (LDiff == RDiff)
3631            return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3632          LDiff = getMinusSCEV(LA, RS);
3633          RDiff = getMinusSCEV(RA, LS);
3634          if (LDiff == RDiff)
3635            return getAddExpr(getUMinExpr(LS, RS), LDiff);
3636        }
3637        break;
3638      case ICmpInst::ICMP_NE:
3639        // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
3640        if (LHS->getType() == U->getType() &&
3641            isa<ConstantInt>(RHS) &&
3642            cast<ConstantInt>(RHS)->isZero()) {
3643          const SCEV *One = getConstant(LHS->getType(), 1);
3644          const SCEV *LS = getSCEV(LHS);
3645          const SCEV *LA = getSCEV(U->getOperand(1));
3646          const SCEV *RA = getSCEV(U->getOperand(2));
3647          const SCEV *LDiff = getMinusSCEV(LA, LS);
3648          const SCEV *RDiff = getMinusSCEV(RA, One);
3649          if (LDiff == RDiff)
3650            return getAddExpr(getUMaxExpr(One, LS), LDiff);
3651        }
3652        break;
3653      case ICmpInst::ICMP_EQ:
3654        // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
3655        if (LHS->getType() == U->getType() &&
3656            isa<ConstantInt>(RHS) &&
3657            cast<ConstantInt>(RHS)->isZero()) {
3658          const SCEV *One = getConstant(LHS->getType(), 1);
3659          const SCEV *LS = getSCEV(LHS);
3660          const SCEV *LA = getSCEV(U->getOperand(1));
3661          const SCEV *RA = getSCEV(U->getOperand(2));
3662          const SCEV *LDiff = getMinusSCEV(LA, One);
3663          const SCEV *RDiff = getMinusSCEV(RA, LS);
3664          if (LDiff == RDiff)
3665            return getAddExpr(getUMaxExpr(One, LS), LDiff);
3666        }
3667        break;
3668      default:
3669        break;
3670      }
3671    }
3672
3673  default: // We cannot analyze this expression.
3674    break;
3675  }
3676
3677  return getUnknown(V);
3678}
3679
3680
3681
3682//===----------------------------------------------------------------------===//
3683//                   Iteration Count Computation Code
3684//
3685
3686/// getBackedgeTakenCount - If the specified loop has a predictable
3687/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3688/// object. The backedge-taken count is the number of times the loop header
3689/// will be branched to from within the loop. This is one less than the
3690/// trip count of the loop, since it doesn't count the first iteration,
3691/// when the header is branched to from outside the loop.
3692///
3693/// Note that it is not valid to call this method on a loop without a
3694/// loop-invariant backedge-taken count (see
3695/// hasLoopInvariantBackedgeTakenCount).
3696///
3697const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3698  return getBackedgeTakenInfo(L).Exact;
3699}
3700
3701/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3702/// return the least SCEV value that is known never to be less than the
3703/// actual backedge taken count.
3704const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3705  return getBackedgeTakenInfo(L).Max;
3706}
3707
3708/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3709/// onto the given Worklist.
3710static void
3711PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3712  BasicBlock *Header = L->getHeader();
3713
3714  // Push all Loop-header PHIs onto the Worklist stack.
3715  for (BasicBlock::iterator I = Header->begin();
3716       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3717    Worklist.push_back(PN);
3718}
3719
3720const ScalarEvolution::BackedgeTakenInfo &
3721ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3722  // Initially insert a CouldNotCompute for this loop. If the insertion
3723  // succeeds, proceed to actually compute a backedge-taken count and
3724  // update the value. The temporary CouldNotCompute value tells SCEV
3725  // code elsewhere that it shouldn't attempt to request a new
3726  // backedge-taken count, which could result in infinite recursion.
3727  std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
3728    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3729  if (Pair.second) {
3730    BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
3731    if (BECount.Exact != getCouldNotCompute()) {
3732      assert(BECount.Exact->isLoopInvariant(L) &&
3733             BECount.Max->isLoopInvariant(L) &&
3734             "Computed backedge-taken count isn't loop invariant for loop!");
3735      ++NumTripCountsComputed;
3736
3737      // Update the value in the map.
3738      Pair.first->second = BECount;
3739    } else {
3740      if (BECount.Max != getCouldNotCompute())
3741        // Update the value in the map.
3742        Pair.first->second = BECount;
3743      if (isa<PHINode>(L->getHeader()->begin()))
3744        // Only count loops that have phi nodes as not being computable.
3745        ++NumTripCountsNotComputed;
3746    }
3747
3748    // Now that we know more about the trip count for this loop, forget any
3749    // existing SCEV values for PHI nodes in this loop since they are only
3750    // conservative estimates made without the benefit of trip count
3751    // information. This is similar to the code in forgetLoop, except that
3752    // it handles SCEVUnknown PHI nodes specially.
3753    if (BECount.hasAnyInfo()) {
3754      SmallVector<Instruction *, 16> Worklist;
3755      PushLoopPHIs(L, Worklist);
3756
3757      SmallPtrSet<Instruction *, 8> Visited;
3758      while (!Worklist.empty()) {
3759        Instruction *I = Worklist.pop_back_val();
3760        if (!Visited.insert(I)) continue;
3761
3762        ValueExprMapType::iterator It =
3763          ValueExprMap.find(static_cast<Value *>(I));
3764        if (It != ValueExprMap.end()) {
3765          const SCEV *Old = It->second;
3766
3767          // SCEVUnknown for a PHI either means that it has an unrecognized
3768          // structure, or it's a PHI that's in the progress of being computed
3769          // by createNodeForPHI.  In the former case, additional loop trip
3770          // count information isn't going to change anything. In the later
3771          // case, createNodeForPHI will perform the necessary updates on its
3772          // own when it gets to that point.
3773          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
3774            ValuesAtScopes.erase(Old);
3775            UnsignedRanges.erase(Old);
3776            SignedRanges.erase(Old);
3777            ValueExprMap.erase(It);
3778          }
3779          if (PHINode *PN = dyn_cast<PHINode>(I))
3780            ConstantEvolutionLoopExitValue.erase(PN);
3781        }
3782
3783        PushDefUseChildren(I, Worklist);
3784      }
3785    }
3786  }
3787  return Pair.first->second;
3788}
3789
3790/// forgetLoop - This method should be called by the client when it has
3791/// changed a loop in a way that may effect ScalarEvolution's ability to
3792/// compute a trip count, or if the loop is deleted.
3793void ScalarEvolution::forgetLoop(const Loop *L) {
3794  // Drop any stored trip count value.
3795  BackedgeTakenCounts.erase(L);
3796
3797  // Drop information about expressions based on loop-header PHIs.
3798  SmallVector<Instruction *, 16> Worklist;
3799  PushLoopPHIs(L, Worklist);
3800
3801  SmallPtrSet<Instruction *, 8> Visited;
3802  while (!Worklist.empty()) {
3803    Instruction *I = Worklist.pop_back_val();
3804    if (!Visited.insert(I)) continue;
3805
3806    ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
3807    if (It != ValueExprMap.end()) {
3808      const SCEV *Old = It->second;
3809      ValuesAtScopes.erase(Old);
3810      UnsignedRanges.erase(Old);
3811      SignedRanges.erase(Old);
3812      ValueExprMap.erase(It);
3813      if (PHINode *PN = dyn_cast<PHINode>(I))
3814        ConstantEvolutionLoopExitValue.erase(PN);
3815    }
3816
3817    PushDefUseChildren(I, Worklist);
3818  }
3819
3820  // Forget all contained loops too, to avoid dangling entries in the
3821  // ValuesAtScopes map.
3822  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
3823    forgetLoop(*I);
3824}
3825
3826/// forgetValue - This method should be called by the client when it has
3827/// changed a value in a way that may effect its value, or which may
3828/// disconnect it from a def-use chain linking it to a loop.
3829void ScalarEvolution::forgetValue(Value *V) {
3830  Instruction *I = dyn_cast<Instruction>(V);
3831  if (!I) return;
3832
3833  // Drop information about expressions based on loop-header PHIs.
3834  SmallVector<Instruction *, 16> Worklist;
3835  Worklist.push_back(I);
3836
3837  SmallPtrSet<Instruction *, 8> Visited;
3838  while (!Worklist.empty()) {
3839    I = Worklist.pop_back_val();
3840    if (!Visited.insert(I)) continue;
3841
3842    ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
3843    if (It != ValueExprMap.end()) {
3844      const SCEV *Old = It->second;
3845      ValuesAtScopes.erase(Old);
3846      UnsignedRanges.erase(Old);
3847      SignedRanges.erase(Old);
3848      ValueExprMap.erase(It);
3849      if (PHINode *PN = dyn_cast<PHINode>(I))
3850        ConstantEvolutionLoopExitValue.erase(PN);
3851    }
3852
3853    PushDefUseChildren(I, Worklist);
3854  }
3855}
3856
3857/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3858/// of the specified loop will execute.
3859ScalarEvolution::BackedgeTakenInfo
3860ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3861  SmallVector<BasicBlock *, 8> ExitingBlocks;
3862  L->getExitingBlocks(ExitingBlocks);
3863
3864  // Examine all exits and pick the most conservative values.
3865  const SCEV *BECount = getCouldNotCompute();
3866  const SCEV *MaxBECount = getCouldNotCompute();
3867  bool CouldNotComputeBECount = false;
3868  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3869    BackedgeTakenInfo NewBTI =
3870      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3871
3872    if (NewBTI.Exact == getCouldNotCompute()) {
3873      // We couldn't compute an exact value for this exit, so
3874      // we won't be able to compute an exact value for the loop.
3875      CouldNotComputeBECount = true;
3876      BECount = getCouldNotCompute();
3877    } else if (!CouldNotComputeBECount) {
3878      if (BECount == getCouldNotCompute())
3879        BECount = NewBTI.Exact;
3880      else
3881        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3882    }
3883    if (MaxBECount == getCouldNotCompute())
3884      MaxBECount = NewBTI.Max;
3885    else if (NewBTI.Max != getCouldNotCompute())
3886      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3887  }
3888
3889  return BackedgeTakenInfo(BECount, MaxBECount);
3890}
3891
3892/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3893/// of the specified loop will execute if it exits via the specified block.
3894ScalarEvolution::BackedgeTakenInfo
3895ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3896                                                   BasicBlock *ExitingBlock) {
3897
3898  // Okay, we've chosen an exiting block.  See what condition causes us to
3899  // exit at this block.
3900  //
3901  // FIXME: we should be able to handle switch instructions (with a single exit)
3902  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3903  if (ExitBr == 0) return getCouldNotCompute();
3904  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3905
3906  // At this point, we know we have a conditional branch that determines whether
3907  // the loop is exited.  However, we don't know if the branch is executed each
3908  // time through the loop.  If not, then the execution count of the branch will
3909  // not be equal to the trip count of the loop.
3910  //
3911  // Currently we check for this by checking to see if the Exit branch goes to
3912  // the loop header.  If so, we know it will always execute the same number of
3913  // times as the loop.  We also handle the case where the exit block *is* the
3914  // loop header.  This is common for un-rotated loops.
3915  //
3916  // If both of those tests fail, walk up the unique predecessor chain to the
3917  // header, stopping if there is an edge that doesn't exit the loop. If the
3918  // header is reached, the execution count of the branch will be equal to the
3919  // trip count of the loop.
3920  //
3921  //  More extensive analysis could be done to handle more cases here.
3922  //
3923  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3924      ExitBr->getSuccessor(1) != L->getHeader() &&
3925      ExitBr->getParent() != L->getHeader()) {
3926    // The simple checks failed, try climbing the unique predecessor chain
3927    // up to the header.
3928    bool Ok = false;
3929    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3930      BasicBlock *Pred = BB->getUniquePredecessor();
3931      if (!Pred)
3932        return getCouldNotCompute();
3933      TerminatorInst *PredTerm = Pred->getTerminator();
3934      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3935        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3936        if (PredSucc == BB)
3937          continue;
3938        // If the predecessor has a successor that isn't BB and isn't
3939        // outside the loop, assume the worst.
3940        if (L->contains(PredSucc))
3941          return getCouldNotCompute();
3942      }
3943      if (Pred == L->getHeader()) {
3944        Ok = true;
3945        break;
3946      }
3947      BB = Pred;
3948    }
3949    if (!Ok)
3950      return getCouldNotCompute();
3951  }
3952
3953  // Proceed to the next level to examine the exit condition expression.
3954  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3955                                               ExitBr->getSuccessor(0),
3956                                               ExitBr->getSuccessor(1));
3957}
3958
3959/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3960/// backedge of the specified loop will execute if its exit condition
3961/// were a conditional branch of ExitCond, TBB, and FBB.
3962ScalarEvolution::BackedgeTakenInfo
3963ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3964                                                       Value *ExitCond,
3965                                                       BasicBlock *TBB,
3966                                                       BasicBlock *FBB) {
3967  // Check if the controlling expression for this loop is an And or Or.
3968  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3969    if (BO->getOpcode() == Instruction::And) {
3970      // Recurse on the operands of the and.
3971      BackedgeTakenInfo BTI0 =
3972        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3973      BackedgeTakenInfo BTI1 =
3974        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3975      const SCEV *BECount = getCouldNotCompute();
3976      const SCEV *MaxBECount = getCouldNotCompute();
3977      if (L->contains(TBB)) {
3978        // Both conditions must be true for the loop to continue executing.
3979        // Choose the less conservative count.
3980        if (BTI0.Exact == getCouldNotCompute() ||
3981            BTI1.Exact == getCouldNotCompute())
3982          BECount = getCouldNotCompute();
3983        else
3984          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3985        if (BTI0.Max == getCouldNotCompute())
3986          MaxBECount = BTI1.Max;
3987        else if (BTI1.Max == getCouldNotCompute())
3988          MaxBECount = BTI0.Max;
3989        else
3990          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3991      } else {
3992        // Both conditions must be true at the same time for the loop to exit.
3993        // For now, be conservative.
3994        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3995        if (BTI0.Max == BTI1.Max)
3996          MaxBECount = BTI0.Max;
3997        if (BTI0.Exact == BTI1.Exact)
3998          BECount = BTI0.Exact;
3999      }
4000
4001      return BackedgeTakenInfo(BECount, MaxBECount);
4002    }
4003    if (BO->getOpcode() == Instruction::Or) {
4004      // Recurse on the operands of the or.
4005      BackedgeTakenInfo BTI0 =
4006        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
4007      BackedgeTakenInfo BTI1 =
4008        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
4009      const SCEV *BECount = getCouldNotCompute();
4010      const SCEV *MaxBECount = getCouldNotCompute();
4011      if (L->contains(FBB)) {
4012        // Both conditions must be false for the loop to continue executing.
4013        // Choose the less conservative count.
4014        if (BTI0.Exact == getCouldNotCompute() ||
4015            BTI1.Exact == getCouldNotCompute())
4016          BECount = getCouldNotCompute();
4017        else
4018          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
4019        if (BTI0.Max == getCouldNotCompute())
4020          MaxBECount = BTI1.Max;
4021        else if (BTI1.Max == getCouldNotCompute())
4022          MaxBECount = BTI0.Max;
4023        else
4024          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
4025      } else {
4026        // Both conditions must be false at the same time for the loop to exit.
4027        // For now, be conservative.
4028        assert(L->contains(TBB) && "Loop block has no successor in loop!");
4029        if (BTI0.Max == BTI1.Max)
4030          MaxBECount = BTI0.Max;
4031        if (BTI0.Exact == BTI1.Exact)
4032          BECount = BTI0.Exact;
4033      }
4034
4035      return BackedgeTakenInfo(BECount, MaxBECount);
4036    }
4037  }
4038
4039  // With an icmp, it may be feasible to compute an exact backedge-taken count.
4040  // Proceed to the next level to examine the icmp.
4041  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
4042    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
4043
4044  // Check for a constant condition. These are normally stripped out by
4045  // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
4046  // preserve the CFG and is temporarily leaving constant conditions
4047  // in place.
4048  if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
4049    if (L->contains(FBB) == !CI->getZExtValue())
4050      // The backedge is always taken.
4051      return getCouldNotCompute();
4052    else
4053      // The backedge is never taken.
4054      return getConstant(CI->getType(), 0);
4055  }
4056
4057  // If it's not an integer or pointer comparison then compute it the hard way.
4058  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
4059}
4060
4061/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
4062/// backedge of the specified loop will execute if its exit condition
4063/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
4064ScalarEvolution::BackedgeTakenInfo
4065ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
4066                                                           ICmpInst *ExitCond,
4067                                                           BasicBlock *TBB,
4068                                                           BasicBlock *FBB) {
4069
4070  // If the condition was exit on true, convert the condition to exit on false
4071  ICmpInst::Predicate Cond;
4072  if (!L->contains(FBB))
4073    Cond = ExitCond->getPredicate();
4074  else
4075    Cond = ExitCond->getInversePredicate();
4076
4077  // Handle common loops like: for (X = "string"; *X; ++X)
4078  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
4079    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
4080      BackedgeTakenInfo ItCnt =
4081        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
4082      if (ItCnt.hasAnyInfo())
4083        return ItCnt;
4084    }
4085
4086  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
4087  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
4088
4089  // Try to evaluate any dependencies out of the loop.
4090  LHS = getSCEVAtScope(LHS, L);
4091  RHS = getSCEVAtScope(RHS, L);
4092
4093  // At this point, we would like to compute how many iterations of the
4094  // loop the predicate will return true for these inputs.
4095  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
4096    // If there is a loop-invariant, force it into the RHS.
4097    std::swap(LHS, RHS);
4098    Cond = ICmpInst::getSwappedPredicate(Cond);
4099  }
4100
4101  // Simplify the operands before analyzing them.
4102  (void)SimplifyICmpOperands(Cond, LHS, RHS);
4103
4104  // If we have a comparison of a chrec against a constant, try to use value
4105  // ranges to answer this query.
4106  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
4107    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
4108      if (AddRec->getLoop() == L) {
4109        // Form the constant range.
4110        ConstantRange CompRange(
4111            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
4112
4113        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
4114        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
4115      }
4116
4117  switch (Cond) {
4118  case ICmpInst::ICMP_NE: {                     // while (X != Y)
4119    // Convert to: while (X-Y != 0)
4120    BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
4121    if (BTI.hasAnyInfo()) return BTI;
4122    break;
4123  }
4124  case ICmpInst::ICMP_EQ: {                     // while (X == Y)
4125    // Convert to: while (X-Y == 0)
4126    BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
4127    if (BTI.hasAnyInfo()) return BTI;
4128    break;
4129  }
4130  case ICmpInst::ICMP_SLT: {
4131    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
4132    if (BTI.hasAnyInfo()) return BTI;
4133    break;
4134  }
4135  case ICmpInst::ICMP_SGT: {
4136    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
4137                                             getNotSCEV(RHS), L, true);
4138    if (BTI.hasAnyInfo()) return BTI;
4139    break;
4140  }
4141  case ICmpInst::ICMP_ULT: {
4142    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
4143    if (BTI.hasAnyInfo()) return BTI;
4144    break;
4145  }
4146  case ICmpInst::ICMP_UGT: {
4147    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
4148                                             getNotSCEV(RHS), L, false);
4149    if (BTI.hasAnyInfo()) return BTI;
4150    break;
4151  }
4152  default:
4153#if 0
4154    dbgs() << "ComputeBackedgeTakenCount ";
4155    if (ExitCond->getOperand(0)->getType()->isUnsigned())
4156      dbgs() << "[unsigned] ";
4157    dbgs() << *LHS << "   "
4158         << Instruction::getOpcodeName(Instruction::ICmp)
4159         << "   " << *RHS << "\n";
4160#endif
4161    break;
4162  }
4163  return
4164    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
4165}
4166
4167static ConstantInt *
4168EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
4169                                ScalarEvolution &SE) {
4170  const SCEV *InVal = SE.getConstant(C);
4171  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
4172  assert(isa<SCEVConstant>(Val) &&
4173         "Evaluation of SCEV at constant didn't fold correctly?");
4174  return cast<SCEVConstant>(Val)->getValue();
4175}
4176
4177/// GetAddressedElementFromGlobal - Given a global variable with an initializer
4178/// and a GEP expression (missing the pointer index) indexing into it, return
4179/// the addressed element of the initializer or null if the index expression is
4180/// invalid.
4181static Constant *
4182GetAddressedElementFromGlobal(GlobalVariable *GV,
4183                              const std::vector<ConstantInt*> &Indices) {
4184  Constant *Init = GV->getInitializer();
4185  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
4186    uint64_t Idx = Indices[i]->getZExtValue();
4187    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
4188      assert(Idx < CS->getNumOperands() && "Bad struct index!");
4189      Init = cast<Constant>(CS->getOperand(Idx));
4190    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
4191      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
4192      Init = cast<Constant>(CA->getOperand(Idx));
4193    } else if (isa<ConstantAggregateZero>(Init)) {
4194      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
4195        assert(Idx < STy->getNumElements() && "Bad struct index!");
4196        Init = Constant::getNullValue(STy->getElementType(Idx));
4197      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
4198        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
4199        Init = Constant::getNullValue(ATy->getElementType());
4200      } else {
4201        llvm_unreachable("Unknown constant aggregate type!");
4202      }
4203      return 0;
4204    } else {
4205      return 0; // Unknown initializer type
4206    }
4207  }
4208  return Init;
4209}
4210
4211/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
4212/// 'icmp op load X, cst', try to see if we can compute the backedge
4213/// execution count.
4214ScalarEvolution::BackedgeTakenInfo
4215ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
4216                                                LoadInst *LI,
4217                                                Constant *RHS,
4218                                                const Loop *L,
4219                                                ICmpInst::Predicate predicate) {
4220  if (LI->isVolatile()) return getCouldNotCompute();
4221
4222  // Check to see if the loaded pointer is a getelementptr of a global.
4223  // TODO: Use SCEV instead of manually grubbing with GEPs.
4224  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
4225  if (!GEP) return getCouldNotCompute();
4226
4227  // Make sure that it is really a constant global we are gepping, with an
4228  // initializer, and make sure the first IDX is really 0.
4229  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
4230  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
4231      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
4232      !cast<Constant>(GEP->getOperand(1))->isNullValue())
4233    return getCouldNotCompute();
4234
4235  // Okay, we allow one non-constant index into the GEP instruction.
4236  Value *VarIdx = 0;
4237  std::vector<ConstantInt*> Indexes;
4238  unsigned VarIdxNum = 0;
4239  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
4240    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
4241      Indexes.push_back(CI);
4242    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
4243      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
4244      VarIdx = GEP->getOperand(i);
4245      VarIdxNum = i-2;
4246      Indexes.push_back(0);
4247    }
4248
4249  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4250  // Check to see if X is a loop variant variable value now.
4251  const SCEV *Idx = getSCEV(VarIdx);
4252  Idx = getSCEVAtScope(Idx, L);
4253
4254  // We can only recognize very limited forms of loop index expressions, in
4255  // particular, only affine AddRec's like {C1,+,C2}.
4256  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
4257  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
4258      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
4259      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
4260    return getCouldNotCompute();
4261
4262  unsigned MaxSteps = MaxBruteForceIterations;
4263  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
4264    ConstantInt *ItCst = ConstantInt::get(
4265                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
4266    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
4267
4268    // Form the GEP offset.
4269    Indexes[VarIdxNum] = Val;
4270
4271    Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
4272    if (Result == 0) break;  // Cannot compute!
4273
4274    // Evaluate the condition for this iteration.
4275    Result = ConstantExpr::getICmp(predicate, Result, RHS);
4276    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
4277    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
4278#if 0
4279      dbgs() << "\n***\n*** Computed loop count " << *ItCst
4280             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
4281             << "***\n";
4282#endif
4283      ++NumArrayLenItCounts;
4284      return getConstant(ItCst);   // Found terminating iteration!
4285    }
4286  }
4287  return getCouldNotCompute();
4288}
4289
4290
4291/// CanConstantFold - Return true if we can constant fold an instruction of the
4292/// specified type, assuming that all operands were constants.
4293static bool CanConstantFold(const Instruction *I) {
4294  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
4295      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
4296    return true;
4297
4298  if (const CallInst *CI = dyn_cast<CallInst>(I))
4299    if (const Function *F = CI->getCalledFunction())
4300      return canConstantFoldCallTo(F);
4301  return false;
4302}
4303
4304/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4305/// in the loop that V is derived from.  We allow arbitrary operations along the
4306/// way, but the operands of an operation must either be constants or a value
4307/// derived from a constant PHI.  If this expression does not fit with these
4308/// constraints, return null.
4309static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
4310  // If this is not an instruction, or if this is an instruction outside of the
4311  // loop, it can't be derived from a loop PHI.
4312  Instruction *I = dyn_cast<Instruction>(V);
4313  if (I == 0 || !L->contains(I)) return 0;
4314
4315  if (PHINode *PN = dyn_cast<PHINode>(I)) {
4316    if (L->getHeader() == I->getParent())
4317      return PN;
4318    else
4319      // We don't currently keep track of the control flow needed to evaluate
4320      // PHIs, so we cannot handle PHIs inside of loops.
4321      return 0;
4322  }
4323
4324  // If we won't be able to constant fold this expression even if the operands
4325  // are constants, return early.
4326  if (!CanConstantFold(I)) return 0;
4327
4328  // Otherwise, we can evaluate this instruction if all of its operands are
4329  // constant or derived from a PHI node themselves.
4330  PHINode *PHI = 0;
4331  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
4332    if (!isa<Constant>(I->getOperand(Op))) {
4333      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
4334      if (P == 0) return 0;  // Not evolving from PHI
4335      if (PHI == 0)
4336        PHI = P;
4337      else if (PHI != P)
4338        return 0;  // Evolving from multiple different PHIs.
4339    }
4340
4341  // This is a expression evolving from a constant PHI!
4342  return PHI;
4343}
4344
4345/// EvaluateExpression - Given an expression that passes the
4346/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4347/// in the loop has the value PHIVal.  If we can't fold this expression for some
4348/// reason, return null.
4349static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
4350                                    const TargetData *TD) {
4351  if (isa<PHINode>(V)) return PHIVal;
4352  if (Constant *C = dyn_cast<Constant>(V)) return C;
4353  Instruction *I = cast<Instruction>(V);
4354
4355  std::vector<Constant*> Operands(I->getNumOperands());
4356
4357  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4358    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
4359    if (Operands[i] == 0) return 0;
4360  }
4361
4362  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4363    return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4364                                           Operands[1], TD);
4365  return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4366                                  &Operands[0], Operands.size(), TD);
4367}
4368
4369/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4370/// in the header of its containing loop, we know the loop executes a
4371/// constant number of times, and the PHI node is just a recurrence
4372/// involving constants, fold it.
4373Constant *
4374ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4375                                                   const APInt &BEs,
4376                                                   const Loop *L) {
4377  std::map<PHINode*, Constant*>::const_iterator I =
4378    ConstantEvolutionLoopExitValue.find(PN);
4379  if (I != ConstantEvolutionLoopExitValue.end())
4380    return I->second;
4381
4382  if (BEs.ugt(MaxBruteForceIterations))
4383    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
4384
4385  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4386
4387  // Since the loop is canonicalized, the PHI node must have two entries.  One
4388  // entry must be a constant (coming in from outside of the loop), and the
4389  // second must be derived from the same PHI.
4390  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4391  Constant *StartCST =
4392    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4393  if (StartCST == 0)
4394    return RetVal = 0;  // Must be a constant.
4395
4396  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4397  if (getConstantEvolvingPHI(BEValue, L) != PN &&
4398      !isa<Constant>(BEValue))
4399    return RetVal = 0;  // Not derived from same PHI.
4400
4401  // Execute the loop symbolically to determine the exit value.
4402  if (BEs.getActiveBits() >= 32)
4403    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4404
4405  unsigned NumIterations = BEs.getZExtValue(); // must be in range
4406  unsigned IterationNum = 0;
4407  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
4408    if (IterationNum == NumIterations)
4409      return RetVal = PHIVal;  // Got exit value!
4410
4411    // Compute the value of the PHI node for the next iteration.
4412    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4413    if (NextPHI == PHIVal)
4414      return RetVal = NextPHI;  // Stopped evolving!
4415    if (NextPHI == 0)
4416      return 0;        // Couldn't evaluate!
4417    PHIVal = NextPHI;
4418  }
4419}
4420
4421/// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4422/// constant number of times (the condition evolves only from constants),
4423/// try to evaluate a few iterations of the loop until we get the exit
4424/// condition gets a value of ExitWhen (true or false).  If we cannot
4425/// evaluate the trip count of the loop, return getCouldNotCompute().
4426const SCEV *
4427ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
4428                                                       Value *Cond,
4429                                                       bool ExitWhen) {
4430  PHINode *PN = getConstantEvolvingPHI(Cond, L);
4431  if (PN == 0) return getCouldNotCompute();
4432
4433  // If the loop is canonicalized, the PHI will have exactly two entries.
4434  // That's the only form we support here.
4435  if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
4436
4437  // One entry must be a constant (coming in from outside of the loop), and the
4438  // second must be derived from the same PHI.
4439  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4440  Constant *StartCST =
4441    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4442  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
4443
4444  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4445  if (getConstantEvolvingPHI(BEValue, L) != PN &&
4446      !isa<Constant>(BEValue))
4447    return getCouldNotCompute();  // Not derived from same PHI.
4448
4449  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
4450  // the loop symbolically to determine when the condition gets a value of
4451  // "ExitWhen".
4452  unsigned IterationNum = 0;
4453  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
4454  for (Constant *PHIVal = StartCST;
4455       IterationNum != MaxIterations; ++IterationNum) {
4456    ConstantInt *CondVal =
4457      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
4458
4459    // Couldn't symbolically evaluate.
4460    if (!CondVal) return getCouldNotCompute();
4461
4462    if (CondVal->getValue() == uint64_t(ExitWhen)) {
4463      ++NumBruteForceTripCountsComputed;
4464      return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4465    }
4466
4467    // Compute the value of the PHI node for the next iteration.
4468    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4469    if (NextPHI == 0 || NextPHI == PHIVal)
4470      return getCouldNotCompute();// Couldn't evaluate or not making progress...
4471    PHIVal = NextPHI;
4472  }
4473
4474  // Too many iterations were needed to evaluate.
4475  return getCouldNotCompute();
4476}
4477
4478/// getSCEVAtScope - Return a SCEV expression for the specified value
4479/// at the specified scope in the program.  The L value specifies a loop
4480/// nest to evaluate the expression at, where null is the top-level or a
4481/// specified loop is immediately inside of the loop.
4482///
4483/// This method can be used to compute the exit value for a variable defined
4484/// in a loop by querying what the value will hold in the parent loop.
4485///
4486/// In the case that a relevant loop exit value cannot be computed, the
4487/// original value V is returned.
4488const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4489  // Check to see if we've folded this expression at this loop before.
4490  std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4491  std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4492    Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4493  if (!Pair.second)
4494    return Pair.first->second ? Pair.first->second : V;
4495
4496  // Otherwise compute it.
4497  const SCEV *C = computeSCEVAtScope(V, L);
4498  ValuesAtScopes[V][L] = C;
4499  return C;
4500}
4501
4502const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4503  if (isa<SCEVConstant>(V)) return V;
4504
4505  // If this instruction is evolved from a constant-evolving PHI, compute the
4506  // exit value from the loop without using SCEVs.
4507  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4508    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4509      const Loop *LI = (*this->LI)[I->getParent()];
4510      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
4511        if (PHINode *PN = dyn_cast<PHINode>(I))
4512          if (PN->getParent() == LI->getHeader()) {
4513            // Okay, there is no closed form solution for the PHI node.  Check
4514            // to see if the loop that contains it has a known backedge-taken
4515            // count.  If so, we may be able to force computation of the exit
4516            // value.
4517            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4518            if (const SCEVConstant *BTCC =
4519                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4520              // Okay, we know how many times the containing loop executes.  If
4521              // this is a constant evolving PHI node, get the final value at
4522              // the specified iteration number.
4523              Constant *RV = getConstantEvolutionLoopExitValue(PN,
4524                                                   BTCC->getValue()->getValue(),
4525                                                               LI);
4526              if (RV) return getSCEV(RV);
4527            }
4528          }
4529
4530      // Okay, this is an expression that we cannot symbolically evaluate
4531      // into a SCEV.  Check to see if it's possible to symbolically evaluate
4532      // the arguments into constants, and if so, try to constant propagate the
4533      // result.  This is particularly useful for computing loop exit values.
4534      if (CanConstantFold(I)) {
4535        SmallVector<Constant *, 4> Operands;
4536        bool MadeImprovement = false;
4537        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4538          Value *Op = I->getOperand(i);
4539          if (Constant *C = dyn_cast<Constant>(Op)) {
4540            Operands.push_back(C);
4541            continue;
4542          }
4543
4544          // If any of the operands is non-constant and if they are
4545          // non-integer and non-pointer, don't even try to analyze them
4546          // with scev techniques.
4547          if (!isSCEVable(Op->getType()))
4548            return V;
4549
4550          const SCEV *OrigV = getSCEV(Op);
4551          const SCEV *OpV = getSCEVAtScope(OrigV, L);
4552          MadeImprovement |= OrigV != OpV;
4553
4554          Constant *C = 0;
4555          if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
4556            C = SC->getValue();
4557          if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
4558            C = dyn_cast<Constant>(SU->getValue());
4559          if (!C) return V;
4560          if (C->getType() != Op->getType())
4561            C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4562                                                              Op->getType(),
4563                                                              false),
4564                                      C, Op->getType());
4565          Operands.push_back(C);
4566        }
4567
4568        // Check to see if getSCEVAtScope actually made an improvement.
4569        if (MadeImprovement) {
4570          Constant *C = 0;
4571          if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4572            C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4573                                                Operands[0], Operands[1], TD);
4574          else
4575            C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4576                                         &Operands[0], Operands.size(), TD);
4577          if (!C) return V;
4578          return getSCEV(C);
4579        }
4580      }
4581    }
4582
4583    // This is some other type of SCEVUnknown, just return it.
4584    return V;
4585  }
4586
4587  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
4588    // Avoid performing the look-up in the common case where the specified
4589    // expression has no loop-variant portions.
4590    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
4591      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4592      if (OpAtScope != Comm->getOperand(i)) {
4593        // Okay, at least one of these operands is loop variant but might be
4594        // foldable.  Build a new instance of the folded commutative expression.
4595        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
4596                                            Comm->op_begin()+i);
4597        NewOps.push_back(OpAtScope);
4598
4599        for (++i; i != e; ++i) {
4600          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4601          NewOps.push_back(OpAtScope);
4602        }
4603        if (isa<SCEVAddExpr>(Comm))
4604          return getAddExpr(NewOps);
4605        if (isa<SCEVMulExpr>(Comm))
4606          return getMulExpr(NewOps);
4607        if (isa<SCEVSMaxExpr>(Comm))
4608          return getSMaxExpr(NewOps);
4609        if (isa<SCEVUMaxExpr>(Comm))
4610          return getUMaxExpr(NewOps);
4611        llvm_unreachable("Unknown commutative SCEV type!");
4612      }
4613    }
4614    // If we got here, all operands are loop invariant.
4615    return Comm;
4616  }
4617
4618  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
4619    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
4620    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
4621    if (LHS == Div->getLHS() && RHS == Div->getRHS())
4622      return Div;   // must be loop invariant
4623    return getUDivExpr(LHS, RHS);
4624  }
4625
4626  // If this is a loop recurrence for a loop that does not contain L, then we
4627  // are dealing with the final value computed by the loop.
4628  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4629    // First, attempt to evaluate each operand.
4630    // Avoid performing the look-up in the common case where the specified
4631    // expression has no loop-variant portions.
4632    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
4633      const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
4634      if (OpAtScope == AddRec->getOperand(i))
4635        continue;
4636
4637      // Okay, at least one of these operands is loop variant but might be
4638      // foldable.  Build a new instance of the folded commutative expression.
4639      SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
4640                                          AddRec->op_begin()+i);
4641      NewOps.push_back(OpAtScope);
4642      for (++i; i != e; ++i)
4643        NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
4644
4645      AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
4646      break;
4647    }
4648
4649    // If the scope is outside the addrec's loop, evaluate it by using the
4650    // loop exit value of the addrec.
4651    if (!AddRec->getLoop()->contains(L)) {
4652      // To evaluate this recurrence, we need to know how many times the AddRec
4653      // loop iterates.  Compute this now.
4654      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
4655      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
4656
4657      // Then, evaluate the AddRec.
4658      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
4659    }
4660
4661    return AddRec;
4662  }
4663
4664  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
4665    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4666    if (Op == Cast->getOperand())
4667      return Cast;  // must be loop invariant
4668    return getZeroExtendExpr(Op, Cast->getType());
4669  }
4670
4671  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
4672    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4673    if (Op == Cast->getOperand())
4674      return Cast;  // must be loop invariant
4675    return getSignExtendExpr(Op, Cast->getType());
4676  }
4677
4678  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
4679    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4680    if (Op == Cast->getOperand())
4681      return Cast;  // must be loop invariant
4682    return getTruncateExpr(Op, Cast->getType());
4683  }
4684
4685  llvm_unreachable("Unknown SCEV type!");
4686  return 0;
4687}
4688
4689/// getSCEVAtScope - This is a convenience function which does
4690/// getSCEVAtScope(getSCEV(V), L).
4691const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
4692  return getSCEVAtScope(getSCEV(V), L);
4693}
4694
4695/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4696/// following equation:
4697///
4698///     A * X = B (mod N)
4699///
4700/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4701/// A and B isn't important.
4702///
4703/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4704static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
4705                                               ScalarEvolution &SE) {
4706  uint32_t BW = A.getBitWidth();
4707  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
4708  assert(A != 0 && "A must be non-zero.");
4709
4710  // 1. D = gcd(A, N)
4711  //
4712  // The gcd of A and N may have only one prime factor: 2. The number of
4713  // trailing zeros in A is its multiplicity
4714  uint32_t Mult2 = A.countTrailingZeros();
4715  // D = 2^Mult2
4716
4717  // 2. Check if B is divisible by D.
4718  //
4719  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4720  // is not less than multiplicity of this prime factor for D.
4721  if (B.countTrailingZeros() < Mult2)
4722    return SE.getCouldNotCompute();
4723
4724  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4725  // modulo (N / D).
4726  //
4727  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4728  // bit width during computations.
4729  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4730  APInt Mod(BW + 1, 0);
4731  Mod.set(BW - Mult2);  // Mod = N / D
4732  APInt I = AD.multiplicativeInverse(Mod);
4733
4734  // 4. Compute the minimum unsigned root of the equation:
4735  // I * (B / D) mod (N / D)
4736  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4737
4738  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4739  // bits.
4740  return SE.getConstant(Result.trunc(BW));
4741}
4742
4743/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4744/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4745/// might be the same) or two SCEVCouldNotCompute objects.
4746///
4747static std::pair<const SCEV *,const SCEV *>
4748SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4749  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4750  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4751  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4752  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4753
4754  // We currently can only solve this if the coefficients are constants.
4755  if (!LC || !MC || !NC) {
4756    const SCEV *CNC = SE.getCouldNotCompute();
4757    return std::make_pair(CNC, CNC);
4758  }
4759
4760  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4761  const APInt &L = LC->getValue()->getValue();
4762  const APInt &M = MC->getValue()->getValue();
4763  const APInt &N = NC->getValue()->getValue();
4764  APInt Two(BitWidth, 2);
4765  APInt Four(BitWidth, 4);
4766
4767  {
4768    using namespace APIntOps;
4769    const APInt& C = L;
4770    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4771    // The B coefficient is M-N/2
4772    APInt B(M);
4773    B -= sdiv(N,Two);
4774
4775    // The A coefficient is N/2
4776    APInt A(N.sdiv(Two));
4777
4778    // Compute the B^2-4ac term.
4779    APInt SqrtTerm(B);
4780    SqrtTerm *= B;
4781    SqrtTerm -= Four * (A * C);
4782
4783    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4784    // integer value or else APInt::sqrt() will assert.
4785    APInt SqrtVal(SqrtTerm.sqrt());
4786
4787    // Compute the two solutions for the quadratic formula.
4788    // The divisions must be performed as signed divisions.
4789    APInt NegB(-B);
4790    APInt TwoA( A << 1 );
4791    if (TwoA.isMinValue()) {
4792      const SCEV *CNC = SE.getCouldNotCompute();
4793      return std::make_pair(CNC, CNC);
4794    }
4795
4796    LLVMContext &Context = SE.getContext();
4797
4798    ConstantInt *Solution1 =
4799      ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4800    ConstantInt *Solution2 =
4801      ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4802
4803    return std::make_pair(SE.getConstant(Solution1),
4804                          SE.getConstant(Solution2));
4805    } // end APIntOps namespace
4806}
4807
4808/// HowFarToZero - Return the number of times a backedge comparing the specified
4809/// value to zero will execute.  If not computable, return CouldNotCompute.
4810ScalarEvolution::BackedgeTakenInfo
4811ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4812  // If the value is a constant
4813  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4814    // If the value is already zero, the branch will execute zero times.
4815    if (C->getValue()->isZero()) return C;
4816    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4817  }
4818
4819  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4820  if (!AddRec || AddRec->getLoop() != L)
4821    return getCouldNotCompute();
4822
4823  if (AddRec->isAffine()) {
4824    // If this is an affine expression, the execution count of this branch is
4825    // the minimum unsigned root of the following equation:
4826    //
4827    //     Start + Step*N = 0 (mod 2^BW)
4828    //
4829    // equivalent to:
4830    //
4831    //             Step*N = -Start (mod 2^BW)
4832    //
4833    // where BW is the common bit width of Start and Step.
4834
4835    // Get the initial value for the loop.
4836    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4837                                       L->getParentLoop());
4838    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4839                                      L->getParentLoop());
4840
4841    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4842      // For now we handle only constant steps.
4843
4844      // First, handle unitary steps.
4845      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4846        return getNegativeSCEV(Start);          //   N = -Start (as unsigned)
4847      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4848        return Start;                           //    N = Start (as unsigned)
4849
4850      // Then, try to solve the above equation provided that Start is constant.
4851      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4852        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4853                                            -StartC->getValue()->getValue(),
4854                                            *this);
4855    }
4856  } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
4857    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4858    // the quadratic equation to solve it.
4859    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4860                                                                    *this);
4861    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4862    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4863    if (R1) {
4864#if 0
4865      dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
4866             << "  sol#2: " << *R2 << "\n";
4867#endif
4868      // Pick the smallest positive root value.
4869      if (ConstantInt *CB =
4870          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4871                                   R1->getValue(), R2->getValue()))) {
4872        if (CB->getZExtValue() == false)
4873          std::swap(R1, R2);   // R1 is the minimum root now.
4874
4875        // We can only use this value if the chrec ends up with an exact zero
4876        // value at this index.  When solving for "X*X != 5", for example, we
4877        // should not accept a root of 2.
4878        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4879        if (Val->isZero())
4880          return R1;  // We found a quadratic root!
4881      }
4882    }
4883  }
4884
4885  return getCouldNotCompute();
4886}
4887
4888/// HowFarToNonZero - Return the number of times a backedge checking the
4889/// specified value for nonzero will execute.  If not computable, return
4890/// CouldNotCompute
4891ScalarEvolution::BackedgeTakenInfo
4892ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4893  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4894  // handle them yet except for the trivial case.  This could be expanded in the
4895  // future as needed.
4896
4897  // If the value is a constant, check to see if it is known to be non-zero
4898  // already.  If so, the backedge will execute zero times.
4899  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4900    if (!C->getValue()->isNullValue())
4901      return getConstant(C->getType(), 0);
4902    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4903  }
4904
4905  // We could implement others, but I really doubt anyone writes loops like
4906  // this, and if they did, they would already be constant folded.
4907  return getCouldNotCompute();
4908}
4909
4910/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4911/// (which may not be an immediate predecessor) which has exactly one
4912/// successor from which BB is reachable, or null if no such block is
4913/// found.
4914///
4915std::pair<BasicBlock *, BasicBlock *>
4916ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4917  // If the block has a unique predecessor, then there is no path from the
4918  // predecessor to the block that does not go through the direct edge
4919  // from the predecessor to the block.
4920  if (BasicBlock *Pred = BB->getSinglePredecessor())
4921    return std::make_pair(Pred, BB);
4922
4923  // A loop's header is defined to be a block that dominates the loop.
4924  // If the header has a unique predecessor outside the loop, it must be
4925  // a block that has exactly one successor that can reach the loop.
4926  if (Loop *L = LI->getLoopFor(BB))
4927    return std::make_pair(L->getLoopPredecessor(), L->getHeader());
4928
4929  return std::pair<BasicBlock *, BasicBlock *>();
4930}
4931
4932/// HasSameValue - SCEV structural equivalence is usually sufficient for
4933/// testing whether two expressions are equal, however for the purposes of
4934/// looking for a condition guarding a loop, it can be useful to be a little
4935/// more general, since a front-end may have replicated the controlling
4936/// expression.
4937///
4938static bool HasSameValue(const SCEV *A, const SCEV *B) {
4939  // Quick check to see if they are the same SCEV.
4940  if (A == B) return true;
4941
4942  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4943  // two different instructions with the same value. Check for this case.
4944  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4945    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4946      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4947        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4948          if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
4949            return true;
4950
4951  // Otherwise assume they may have a different value.
4952  return false;
4953}
4954
4955/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
4956/// predicate Pred. Return true iff any changes were made.
4957///
4958bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
4959                                           const SCEV *&LHS, const SCEV *&RHS) {
4960  bool Changed = false;
4961
4962  // Canonicalize a constant to the right side.
4963  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
4964    // Check for both operands constant.
4965    if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
4966      if (ConstantExpr::getICmp(Pred,
4967                                LHSC->getValue(),
4968                                RHSC->getValue())->isNullValue())
4969        goto trivially_false;
4970      else
4971        goto trivially_true;
4972    }
4973    // Otherwise swap the operands to put the constant on the right.
4974    std::swap(LHS, RHS);
4975    Pred = ICmpInst::getSwappedPredicate(Pred);
4976    Changed = true;
4977  }
4978
4979  // If we're comparing an addrec with a value which is loop-invariant in the
4980  // addrec's loop, put the addrec on the left. Also make a dominance check,
4981  // as both operands could be addrecs loop-invariant in each other's loop.
4982  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
4983    const Loop *L = AR->getLoop();
4984    if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) {
4985      std::swap(LHS, RHS);
4986      Pred = ICmpInst::getSwappedPredicate(Pred);
4987      Changed = true;
4988    }
4989  }
4990
4991  // If there's a constant operand, canonicalize comparisons with boundary
4992  // cases, and canonicalize *-or-equal comparisons to regular comparisons.
4993  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4994    const APInt &RA = RC->getValue()->getValue();
4995    switch (Pred) {
4996    default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4997    case ICmpInst::ICMP_EQ:
4998    case ICmpInst::ICMP_NE:
4999      break;
5000    case ICmpInst::ICMP_UGE:
5001      if ((RA - 1).isMinValue()) {
5002        Pred = ICmpInst::ICMP_NE;
5003        RHS = getConstant(RA - 1);
5004        Changed = true;
5005        break;
5006      }
5007      if (RA.isMaxValue()) {
5008        Pred = ICmpInst::ICMP_EQ;
5009        Changed = true;
5010        break;
5011      }
5012      if (RA.isMinValue()) goto trivially_true;
5013
5014      Pred = ICmpInst::ICMP_UGT;
5015      RHS = getConstant(RA - 1);
5016      Changed = true;
5017      break;
5018    case ICmpInst::ICMP_ULE:
5019      if ((RA + 1).isMaxValue()) {
5020        Pred = ICmpInst::ICMP_NE;
5021        RHS = getConstant(RA + 1);
5022        Changed = true;
5023        break;
5024      }
5025      if (RA.isMinValue()) {
5026        Pred = ICmpInst::ICMP_EQ;
5027        Changed = true;
5028        break;
5029      }
5030      if (RA.isMaxValue()) goto trivially_true;
5031
5032      Pred = ICmpInst::ICMP_ULT;
5033      RHS = getConstant(RA + 1);
5034      Changed = true;
5035      break;
5036    case ICmpInst::ICMP_SGE:
5037      if ((RA - 1).isMinSignedValue()) {
5038        Pred = ICmpInst::ICMP_NE;
5039        RHS = getConstant(RA - 1);
5040        Changed = true;
5041        break;
5042      }
5043      if (RA.isMaxSignedValue()) {
5044        Pred = ICmpInst::ICMP_EQ;
5045        Changed = true;
5046        break;
5047      }
5048      if (RA.isMinSignedValue()) goto trivially_true;
5049
5050      Pred = ICmpInst::ICMP_SGT;
5051      RHS = getConstant(RA - 1);
5052      Changed = true;
5053      break;
5054    case ICmpInst::ICMP_SLE:
5055      if ((RA + 1).isMaxSignedValue()) {
5056        Pred = ICmpInst::ICMP_NE;
5057        RHS = getConstant(RA + 1);
5058        Changed = true;
5059        break;
5060      }
5061      if (RA.isMinSignedValue()) {
5062        Pred = ICmpInst::ICMP_EQ;
5063        Changed = true;
5064        break;
5065      }
5066      if (RA.isMaxSignedValue()) goto trivially_true;
5067
5068      Pred = ICmpInst::ICMP_SLT;
5069      RHS = getConstant(RA + 1);
5070      Changed = true;
5071      break;
5072    case ICmpInst::ICMP_UGT:
5073      if (RA.isMinValue()) {
5074        Pred = ICmpInst::ICMP_NE;
5075        Changed = true;
5076        break;
5077      }
5078      if ((RA + 1).isMaxValue()) {
5079        Pred = ICmpInst::ICMP_EQ;
5080        RHS = getConstant(RA + 1);
5081        Changed = true;
5082        break;
5083      }
5084      if (RA.isMaxValue()) goto trivially_false;
5085      break;
5086    case ICmpInst::ICMP_ULT:
5087      if (RA.isMaxValue()) {
5088        Pred = ICmpInst::ICMP_NE;
5089        Changed = true;
5090        break;
5091      }
5092      if ((RA - 1).isMinValue()) {
5093        Pred = ICmpInst::ICMP_EQ;
5094        RHS = getConstant(RA - 1);
5095        Changed = true;
5096        break;
5097      }
5098      if (RA.isMinValue()) goto trivially_false;
5099      break;
5100    case ICmpInst::ICMP_SGT:
5101      if (RA.isMinSignedValue()) {
5102        Pred = ICmpInst::ICMP_NE;
5103        Changed = true;
5104        break;
5105      }
5106      if ((RA + 1).isMaxSignedValue()) {
5107        Pred = ICmpInst::ICMP_EQ;
5108        RHS = getConstant(RA + 1);
5109        Changed = true;
5110        break;
5111      }
5112      if (RA.isMaxSignedValue()) goto trivially_false;
5113      break;
5114    case ICmpInst::ICMP_SLT:
5115      if (RA.isMaxSignedValue()) {
5116        Pred = ICmpInst::ICMP_NE;
5117        Changed = true;
5118        break;
5119      }
5120      if ((RA - 1).isMinSignedValue()) {
5121       Pred = ICmpInst::ICMP_EQ;
5122       RHS = getConstant(RA - 1);
5123        Changed = true;
5124       break;
5125      }
5126      if (RA.isMinSignedValue()) goto trivially_false;
5127      break;
5128    }
5129  }
5130
5131  // Check for obvious equality.
5132  if (HasSameValue(LHS, RHS)) {
5133    if (ICmpInst::isTrueWhenEqual(Pred))
5134      goto trivially_true;
5135    if (ICmpInst::isFalseWhenEqual(Pred))
5136      goto trivially_false;
5137  }
5138
5139  // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
5140  // adding or subtracting 1 from one of the operands.
5141  switch (Pred) {
5142  case ICmpInst::ICMP_SLE:
5143    if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
5144      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5145                       /*HasNUW=*/false, /*HasNSW=*/true);
5146      Pred = ICmpInst::ICMP_SLT;
5147      Changed = true;
5148    } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
5149      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5150                       /*HasNUW=*/false, /*HasNSW=*/true);
5151      Pred = ICmpInst::ICMP_SLT;
5152      Changed = true;
5153    }
5154    break;
5155  case ICmpInst::ICMP_SGE:
5156    if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
5157      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5158                       /*HasNUW=*/false, /*HasNSW=*/true);
5159      Pred = ICmpInst::ICMP_SGT;
5160      Changed = true;
5161    } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
5162      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5163                       /*HasNUW=*/false, /*HasNSW=*/true);
5164      Pred = ICmpInst::ICMP_SGT;
5165      Changed = true;
5166    }
5167    break;
5168  case ICmpInst::ICMP_ULE:
5169    if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
5170      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5171                       /*HasNUW=*/true, /*HasNSW=*/false);
5172      Pred = ICmpInst::ICMP_ULT;
5173      Changed = true;
5174    } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
5175      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5176                       /*HasNUW=*/true, /*HasNSW=*/false);
5177      Pred = ICmpInst::ICMP_ULT;
5178      Changed = true;
5179    }
5180    break;
5181  case ICmpInst::ICMP_UGE:
5182    if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
5183      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5184                       /*HasNUW=*/true, /*HasNSW=*/false);
5185      Pred = ICmpInst::ICMP_UGT;
5186      Changed = true;
5187    } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
5188      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5189                       /*HasNUW=*/true, /*HasNSW=*/false);
5190      Pred = ICmpInst::ICMP_UGT;
5191      Changed = true;
5192    }
5193    break;
5194  default:
5195    break;
5196  }
5197
5198  // TODO: More simplifications are possible here.
5199
5200  return Changed;
5201
5202trivially_true:
5203  // Return 0 == 0.
5204  LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
5205  Pred = ICmpInst::ICMP_EQ;
5206  return true;
5207
5208trivially_false:
5209  // Return 0 != 0.
5210  LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
5211  Pred = ICmpInst::ICMP_NE;
5212  return true;
5213}
5214
5215bool ScalarEvolution::isKnownNegative(const SCEV *S) {
5216  return getSignedRange(S).getSignedMax().isNegative();
5217}
5218
5219bool ScalarEvolution::isKnownPositive(const SCEV *S) {
5220  return getSignedRange(S).getSignedMin().isStrictlyPositive();
5221}
5222
5223bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
5224  return !getSignedRange(S).getSignedMin().isNegative();
5225}
5226
5227bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
5228  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
5229}
5230
5231bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
5232  return isKnownNegative(S) || isKnownPositive(S);
5233}
5234
5235bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
5236                                       const SCEV *LHS, const SCEV *RHS) {
5237  // Canonicalize the inputs first.
5238  (void)SimplifyICmpOperands(Pred, LHS, RHS);
5239
5240  // If LHS or RHS is an addrec, check to see if the condition is true in
5241  // every iteration of the loop.
5242  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
5243    if (isLoopEntryGuardedByCond(
5244          AR->getLoop(), Pred, AR->getStart(), RHS) &&
5245        isLoopBackedgeGuardedByCond(
5246          AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
5247      return true;
5248  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
5249    if (isLoopEntryGuardedByCond(
5250          AR->getLoop(), Pred, LHS, AR->getStart()) &&
5251        isLoopBackedgeGuardedByCond(
5252          AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
5253      return true;
5254
5255  // Otherwise see what can be done with known constant ranges.
5256  return isKnownPredicateWithRanges(Pred, LHS, RHS);
5257}
5258
5259bool
5260ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
5261                                            const SCEV *LHS, const SCEV *RHS) {
5262  if (HasSameValue(LHS, RHS))
5263    return ICmpInst::isTrueWhenEqual(Pred);
5264
5265  // This code is split out from isKnownPredicate because it is called from
5266  // within isLoopEntryGuardedByCond.
5267  switch (Pred) {
5268  default:
5269    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5270    break;
5271  case ICmpInst::ICMP_SGT:
5272    Pred = ICmpInst::ICMP_SLT;
5273    std::swap(LHS, RHS);
5274  case ICmpInst::ICMP_SLT: {
5275    ConstantRange LHSRange = getSignedRange(LHS);
5276    ConstantRange RHSRange = getSignedRange(RHS);
5277    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
5278      return true;
5279    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
5280      return false;
5281    break;
5282  }
5283  case ICmpInst::ICMP_SGE:
5284    Pred = ICmpInst::ICMP_SLE;
5285    std::swap(LHS, RHS);
5286  case ICmpInst::ICMP_SLE: {
5287    ConstantRange LHSRange = getSignedRange(LHS);
5288    ConstantRange RHSRange = getSignedRange(RHS);
5289    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
5290      return true;
5291    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
5292      return false;
5293    break;
5294  }
5295  case ICmpInst::ICMP_UGT:
5296    Pred = ICmpInst::ICMP_ULT;
5297    std::swap(LHS, RHS);
5298  case ICmpInst::ICMP_ULT: {
5299    ConstantRange LHSRange = getUnsignedRange(LHS);
5300    ConstantRange RHSRange = getUnsignedRange(RHS);
5301    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
5302      return true;
5303    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
5304      return false;
5305    break;
5306  }
5307  case ICmpInst::ICMP_UGE:
5308    Pred = ICmpInst::ICMP_ULE;
5309    std::swap(LHS, RHS);
5310  case ICmpInst::ICMP_ULE: {
5311    ConstantRange LHSRange = getUnsignedRange(LHS);
5312    ConstantRange RHSRange = getUnsignedRange(RHS);
5313    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
5314      return true;
5315    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
5316      return false;
5317    break;
5318  }
5319  case ICmpInst::ICMP_NE: {
5320    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
5321      return true;
5322    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
5323      return true;
5324
5325    const SCEV *Diff = getMinusSCEV(LHS, RHS);
5326    if (isKnownNonZero(Diff))
5327      return true;
5328    break;
5329  }
5330  case ICmpInst::ICMP_EQ:
5331    // The check at the top of the function catches the case where
5332    // the values are known to be equal.
5333    break;
5334  }
5335  return false;
5336}
5337
5338/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
5339/// protected by a conditional between LHS and RHS.  This is used to
5340/// to eliminate casts.
5341bool
5342ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
5343                                             ICmpInst::Predicate Pred,
5344                                             const SCEV *LHS, const SCEV *RHS) {
5345  // Interpret a null as meaning no loop, where there is obviously no guard
5346  // (interprocedural conditions notwithstanding).
5347  if (!L) return true;
5348
5349  BasicBlock *Latch = L->getLoopLatch();
5350  if (!Latch)
5351    return false;
5352
5353  BranchInst *LoopContinuePredicate =
5354    dyn_cast<BranchInst>(Latch->getTerminator());
5355  if (!LoopContinuePredicate ||
5356      LoopContinuePredicate->isUnconditional())
5357    return false;
5358
5359  return isImpliedCond(Pred, LHS, RHS,
5360                       LoopContinuePredicate->getCondition(),
5361                       LoopContinuePredicate->getSuccessor(0) != L->getHeader());
5362}
5363
5364/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
5365/// by a conditional between LHS and RHS.  This is used to help avoid max
5366/// expressions in loop trip counts, and to eliminate casts.
5367bool
5368ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
5369                                          ICmpInst::Predicate Pred,
5370                                          const SCEV *LHS, const SCEV *RHS) {
5371  // Interpret a null as meaning no loop, where there is obviously no guard
5372  // (interprocedural conditions notwithstanding).
5373  if (!L) return false;
5374
5375  // Starting at the loop predecessor, climb up the predecessor chain, as long
5376  // as there are predecessors that can be found that have unique successors
5377  // leading to the original header.
5378  for (std::pair<BasicBlock *, BasicBlock *>
5379         Pair(L->getLoopPredecessor(), L->getHeader());
5380       Pair.first;
5381       Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
5382
5383    BranchInst *LoopEntryPredicate =
5384      dyn_cast<BranchInst>(Pair.first->getTerminator());
5385    if (!LoopEntryPredicate ||
5386        LoopEntryPredicate->isUnconditional())
5387      continue;
5388
5389    if (isImpliedCond(Pred, LHS, RHS,
5390                      LoopEntryPredicate->getCondition(),
5391                      LoopEntryPredicate->getSuccessor(0) != Pair.second))
5392      return true;
5393  }
5394
5395  return false;
5396}
5397
5398/// isImpliedCond - Test whether the condition described by Pred, LHS,
5399/// and RHS is true whenever the given Cond value evaluates to true.
5400bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
5401                                    const SCEV *LHS, const SCEV *RHS,
5402                                    Value *FoundCondValue,
5403                                    bool Inverse) {
5404  // Recursively handle And and Or conditions.
5405  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
5406    if (BO->getOpcode() == Instruction::And) {
5407      if (!Inverse)
5408        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5409               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5410    } else if (BO->getOpcode() == Instruction::Or) {
5411      if (Inverse)
5412        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5413               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5414    }
5415  }
5416
5417  ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
5418  if (!ICI) return false;
5419
5420  // Bail if the ICmp's operands' types are wider than the needed type
5421  // before attempting to call getSCEV on them. This avoids infinite
5422  // recursion, since the analysis of widening casts can require loop
5423  // exit condition information for overflow checking, which would
5424  // lead back here.
5425  if (getTypeSizeInBits(LHS->getType()) <
5426      getTypeSizeInBits(ICI->getOperand(0)->getType()))
5427    return false;
5428
5429  // Now that we found a conditional branch that dominates the loop, check to
5430  // see if it is the comparison we are looking for.
5431  ICmpInst::Predicate FoundPred;
5432  if (Inverse)
5433    FoundPred = ICI->getInversePredicate();
5434  else
5435    FoundPred = ICI->getPredicate();
5436
5437  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
5438  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
5439
5440  // Balance the types. The case where FoundLHS' type is wider than
5441  // LHS' type is checked for above.
5442  if (getTypeSizeInBits(LHS->getType()) >
5443      getTypeSizeInBits(FoundLHS->getType())) {
5444    if (CmpInst::isSigned(Pred)) {
5445      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
5446      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
5447    } else {
5448      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
5449      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
5450    }
5451  }
5452
5453  // Canonicalize the query to match the way instcombine will have
5454  // canonicalized the comparison.
5455  if (SimplifyICmpOperands(Pred, LHS, RHS))
5456    if (LHS == RHS)
5457      return CmpInst::isTrueWhenEqual(Pred);
5458  if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
5459    if (FoundLHS == FoundRHS)
5460      return CmpInst::isFalseWhenEqual(Pred);
5461
5462  // Check to see if we can make the LHS or RHS match.
5463  if (LHS == FoundRHS || RHS == FoundLHS) {
5464    if (isa<SCEVConstant>(RHS)) {
5465      std::swap(FoundLHS, FoundRHS);
5466      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
5467    } else {
5468      std::swap(LHS, RHS);
5469      Pred = ICmpInst::getSwappedPredicate(Pred);
5470    }
5471  }
5472
5473  // Check whether the found predicate is the same as the desired predicate.
5474  if (FoundPred == Pred)
5475    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
5476
5477  // Check whether swapping the found predicate makes it the same as the
5478  // desired predicate.
5479  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
5480    if (isa<SCEVConstant>(RHS))
5481      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
5482    else
5483      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
5484                                   RHS, LHS, FoundLHS, FoundRHS);
5485  }
5486
5487  // Check whether the actual condition is beyond sufficient.
5488  if (FoundPred == ICmpInst::ICMP_EQ)
5489    if (ICmpInst::isTrueWhenEqual(Pred))
5490      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
5491        return true;
5492  if (Pred == ICmpInst::ICMP_NE)
5493    if (!ICmpInst::isTrueWhenEqual(FoundPred))
5494      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
5495        return true;
5496
5497  // Otherwise assume the worst.
5498  return false;
5499}
5500
5501/// isImpliedCondOperands - Test whether the condition described by Pred,
5502/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
5503/// and FoundRHS is true.
5504bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
5505                                            const SCEV *LHS, const SCEV *RHS,
5506                                            const SCEV *FoundLHS,
5507                                            const SCEV *FoundRHS) {
5508  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
5509                                     FoundLHS, FoundRHS) ||
5510         // ~x < ~y --> x > y
5511         isImpliedCondOperandsHelper(Pred, LHS, RHS,
5512                                     getNotSCEV(FoundRHS),
5513                                     getNotSCEV(FoundLHS));
5514}
5515
5516/// isImpliedCondOperandsHelper - Test whether the condition described by
5517/// Pred, LHS, and RHS is true whenever the condition described by Pred,
5518/// FoundLHS, and FoundRHS is true.
5519bool
5520ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
5521                                             const SCEV *LHS, const SCEV *RHS,
5522                                             const SCEV *FoundLHS,
5523                                             const SCEV *FoundRHS) {
5524  switch (Pred) {
5525  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5526  case ICmpInst::ICMP_EQ:
5527  case ICmpInst::ICMP_NE:
5528    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
5529      return true;
5530    break;
5531  case ICmpInst::ICMP_SLT:
5532  case ICmpInst::ICMP_SLE:
5533    if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
5534        isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
5535      return true;
5536    break;
5537  case ICmpInst::ICMP_SGT:
5538  case ICmpInst::ICMP_SGE:
5539    if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
5540        isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
5541      return true;
5542    break;
5543  case ICmpInst::ICMP_ULT:
5544  case ICmpInst::ICMP_ULE:
5545    if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
5546        isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
5547      return true;
5548    break;
5549  case ICmpInst::ICMP_UGT:
5550  case ICmpInst::ICMP_UGE:
5551    if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
5552        isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
5553      return true;
5554    break;
5555  }
5556
5557  return false;
5558}
5559
5560/// getBECount - Subtract the end and start values and divide by the step,
5561/// rounding up, to get the number of times the backedge is executed. Return
5562/// CouldNotCompute if an intermediate computation overflows.
5563const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
5564                                        const SCEV *End,
5565                                        const SCEV *Step,
5566                                        bool NoWrap) {
5567  assert(!isKnownNegative(Step) &&
5568         "This code doesn't handle negative strides yet!");
5569
5570  const Type *Ty = Start->getType();
5571  const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
5572  const SCEV *Diff = getMinusSCEV(End, Start);
5573  const SCEV *RoundUp = getAddExpr(Step, NegOne);
5574
5575  // Add an adjustment to the difference between End and Start so that
5576  // the division will effectively round up.
5577  const SCEV *Add = getAddExpr(Diff, RoundUp);
5578
5579  if (!NoWrap) {
5580    // Check Add for unsigned overflow.
5581    // TODO: More sophisticated things could be done here.
5582    const Type *WideTy = IntegerType::get(getContext(),
5583                                          getTypeSizeInBits(Ty) + 1);
5584    const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
5585    const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
5586    const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
5587    if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
5588      return getCouldNotCompute();
5589  }
5590
5591  return getUDivExpr(Add, Step);
5592}
5593
5594/// HowManyLessThans - Return the number of times a backedge containing the
5595/// specified less-than comparison will execute.  If not computable, return
5596/// CouldNotCompute.
5597ScalarEvolution::BackedgeTakenInfo
5598ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
5599                                  const Loop *L, bool isSigned) {
5600  // Only handle:  "ADDREC < LoopInvariant".
5601  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
5602
5603  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
5604  if (!AddRec || AddRec->getLoop() != L)
5605    return getCouldNotCompute();
5606
5607  // Check to see if we have a flag which makes analysis easy.
5608  bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
5609                           AddRec->hasNoUnsignedWrap();
5610
5611  if (AddRec->isAffine()) {
5612    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
5613    const SCEV *Step = AddRec->getStepRecurrence(*this);
5614
5615    if (Step->isZero())
5616      return getCouldNotCompute();
5617    if (Step->isOne()) {
5618      // With unit stride, the iteration never steps past the limit value.
5619    } else if (isKnownPositive(Step)) {
5620      // Test whether a positive iteration can step past the limit
5621      // value and past the maximum value for its type in a single step.
5622      // Note that it's not sufficient to check NoWrap here, because even
5623      // though the value after a wrap is undefined, it's not undefined
5624      // behavior, so if wrap does occur, the loop could either terminate or
5625      // loop infinitely, but in either case, the loop is guaranteed to
5626      // iterate at least until the iteration where the wrapping occurs.
5627      const SCEV *One = getConstant(Step->getType(), 1);
5628      if (isSigned) {
5629        APInt Max = APInt::getSignedMaxValue(BitWidth);
5630        if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
5631              .slt(getSignedRange(RHS).getSignedMax()))
5632          return getCouldNotCompute();
5633      } else {
5634        APInt Max = APInt::getMaxValue(BitWidth);
5635        if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
5636              .ult(getUnsignedRange(RHS).getUnsignedMax()))
5637          return getCouldNotCompute();
5638      }
5639    } else
5640      // TODO: Handle negative strides here and below.
5641      return getCouldNotCompute();
5642
5643    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5644    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
5645    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5646    // treat m-n as signed nor unsigned due to overflow possibility.
5647
5648    // First, we get the value of the LHS in the first iteration: n
5649    const SCEV *Start = AddRec->getOperand(0);
5650
5651    // Determine the minimum constant start value.
5652    const SCEV *MinStart = getConstant(isSigned ?
5653      getSignedRange(Start).getSignedMin() :
5654      getUnsignedRange(Start).getUnsignedMin());
5655
5656    // If we know that the condition is true in order to enter the loop,
5657    // then we know that it will run exactly (m-n)/s times. Otherwise, we
5658    // only know that it will execute (max(m,n)-n)/s times. In both cases,
5659    // the division must round up.
5660    const SCEV *End = RHS;
5661    if (!isLoopEntryGuardedByCond(L,
5662                                  isSigned ? ICmpInst::ICMP_SLT :
5663                                             ICmpInst::ICMP_ULT,
5664                                  getMinusSCEV(Start, Step), RHS))
5665      End = isSigned ? getSMaxExpr(RHS, Start)
5666                     : getUMaxExpr(RHS, Start);
5667
5668    // Determine the maximum constant end value.
5669    const SCEV *MaxEnd = getConstant(isSigned ?
5670      getSignedRange(End).getSignedMax() :
5671      getUnsignedRange(End).getUnsignedMax());
5672
5673    // If MaxEnd is within a step of the maximum integer value in its type,
5674    // adjust it down to the minimum value which would produce the same effect.
5675    // This allows the subsequent ceiling division of (N+(step-1))/step to
5676    // compute the correct value.
5677    const SCEV *StepMinusOne = getMinusSCEV(Step,
5678                                            getConstant(Step->getType(), 1));
5679    MaxEnd = isSigned ?
5680      getSMinExpr(MaxEnd,
5681                  getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
5682                               StepMinusOne)) :
5683      getUMinExpr(MaxEnd,
5684                  getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
5685                               StepMinusOne));
5686
5687    // Finally, we subtract these two values and divide, rounding up, to get
5688    // the number of times the backedge is executed.
5689    const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
5690
5691    // The maximum backedge count is similar, except using the minimum start
5692    // value and the maximum end value.
5693    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
5694
5695    return BackedgeTakenInfo(BECount, MaxBECount);
5696  }
5697
5698  return getCouldNotCompute();
5699}
5700
5701/// getNumIterationsInRange - Return the number of iterations of this loop that
5702/// produce values in the specified constant range.  Another way of looking at
5703/// this is that it returns the first iteration number where the value is not in
5704/// the condition, thus computing the exit count. If the iteration count can't
5705/// be computed, an instance of SCEVCouldNotCompute is returned.
5706const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
5707                                                    ScalarEvolution &SE) const {
5708  if (Range.isFullSet())  // Infinite loop.
5709    return SE.getCouldNotCompute();
5710
5711  // If the start is a non-zero constant, shift the range to simplify things.
5712  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
5713    if (!SC->getValue()->isZero()) {
5714      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
5715      Operands[0] = SE.getConstant(SC->getType(), 0);
5716      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
5717      if (const SCEVAddRecExpr *ShiftedAddRec =
5718            dyn_cast<SCEVAddRecExpr>(Shifted))
5719        return ShiftedAddRec->getNumIterationsInRange(
5720                           Range.subtract(SC->getValue()->getValue()), SE);
5721      // This is strange and shouldn't happen.
5722      return SE.getCouldNotCompute();
5723    }
5724
5725  // The only time we can solve this is when we have all constant indices.
5726  // Otherwise, we cannot determine the overflow conditions.
5727  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5728    if (!isa<SCEVConstant>(getOperand(i)))
5729      return SE.getCouldNotCompute();
5730
5731
5732  // Okay at this point we know that all elements of the chrec are constants and
5733  // that the start element is zero.
5734
5735  // First check to see if the range contains zero.  If not, the first
5736  // iteration exits.
5737  unsigned BitWidth = SE.getTypeSizeInBits(getType());
5738  if (!Range.contains(APInt(BitWidth, 0)))
5739    return SE.getConstant(getType(), 0);
5740
5741  if (isAffine()) {
5742    // If this is an affine expression then we have this situation:
5743    //   Solve {0,+,A} in Range  ===  Ax in Range
5744
5745    // We know that zero is in the range.  If A is positive then we know that
5746    // the upper value of the range must be the first possible exit value.
5747    // If A is negative then the lower of the range is the last possible loop
5748    // value.  Also note that we already checked for a full range.
5749    APInt One(BitWidth,1);
5750    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
5751    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
5752
5753    // The exit value should be (End+A)/A.
5754    APInt ExitVal = (End + A).udiv(A);
5755    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
5756
5757    // Evaluate at the exit value.  If we really did fall out of the valid
5758    // range, then we computed our trip count, otherwise wrap around or other
5759    // things must have happened.
5760    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
5761    if (Range.contains(Val->getValue()))
5762      return SE.getCouldNotCompute();  // Something strange happened
5763
5764    // Ensure that the previous value is in the range.  This is a sanity check.
5765    assert(Range.contains(
5766           EvaluateConstantChrecAtConstant(this,
5767           ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
5768           "Linear scev computation is off in a bad way!");
5769    return SE.getConstant(ExitValue);
5770  } else if (isQuadratic()) {
5771    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5772    // quadratic equation to solve it.  To do this, we must frame our problem in
5773    // terms of figuring out when zero is crossed, instead of when
5774    // Range.getUpper() is crossed.
5775    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
5776    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
5777    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
5778
5779    // Next, solve the constructed addrec
5780    std::pair<const SCEV *,const SCEV *> Roots =
5781      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
5782    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5783    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5784    if (R1) {
5785      // Pick the smallest positive root value.
5786      if (ConstantInt *CB =
5787          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
5788                         R1->getValue(), R2->getValue()))) {
5789        if (CB->getZExtValue() == false)
5790          std::swap(R1, R2);   // R1 is the minimum root now.
5791
5792        // Make sure the root is not off by one.  The returned iteration should
5793        // not be in the range, but the previous one should be.  When solving
5794        // for "X*X < 5", for example, we should not return a root of 2.
5795        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
5796                                                             R1->getValue(),
5797                                                             SE);
5798        if (Range.contains(R1Val->getValue())) {
5799          // The next iteration must be out of the range...
5800          ConstantInt *NextVal =
5801                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
5802
5803          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5804          if (!Range.contains(R1Val->getValue()))
5805            return SE.getConstant(NextVal);
5806          return SE.getCouldNotCompute();  // Something strange happened
5807        }
5808
5809        // If R1 was not in the range, then it is a good return value.  Make
5810        // sure that R1-1 WAS in the range though, just in case.
5811        ConstantInt *NextVal =
5812               ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
5813        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5814        if (Range.contains(R1Val->getValue()))
5815          return R1;
5816        return SE.getCouldNotCompute();  // Something strange happened
5817      }
5818    }
5819  }
5820
5821  return SE.getCouldNotCompute();
5822}
5823
5824
5825
5826//===----------------------------------------------------------------------===//
5827//                   SCEVCallbackVH Class Implementation
5828//===----------------------------------------------------------------------===//
5829
5830void ScalarEvolution::SCEVCallbackVH::deleted() {
5831  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5832  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
5833    SE->ConstantEvolutionLoopExitValue.erase(PN);
5834  SE->ValueExprMap.erase(getValPtr());
5835  // this now dangles!
5836}
5837
5838void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
5839  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5840
5841  // Forget all the expressions associated with users of the old value,
5842  // so that future queries will recompute the expressions using the new
5843  // value.
5844  Value *Old = getValPtr();
5845  SmallVector<User *, 16> Worklist;
5846  SmallPtrSet<User *, 8> Visited;
5847  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
5848       UI != UE; ++UI)
5849    Worklist.push_back(*UI);
5850  while (!Worklist.empty()) {
5851    User *U = Worklist.pop_back_val();
5852    // Deleting the Old value will cause this to dangle. Postpone
5853    // that until everything else is done.
5854    if (U == Old)
5855      continue;
5856    if (!Visited.insert(U))
5857      continue;
5858    if (PHINode *PN = dyn_cast<PHINode>(U))
5859      SE->ConstantEvolutionLoopExitValue.erase(PN);
5860    SE->ValueExprMap.erase(U);
5861    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
5862         UI != UE; ++UI)
5863      Worklist.push_back(*UI);
5864  }
5865  // Delete the Old value.
5866  if (PHINode *PN = dyn_cast<PHINode>(Old))
5867    SE->ConstantEvolutionLoopExitValue.erase(PN);
5868  SE->ValueExprMap.erase(Old);
5869  // this now dangles!
5870}
5871
5872ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
5873  : CallbackVH(V), SE(se) {}
5874
5875//===----------------------------------------------------------------------===//
5876//                   ScalarEvolution Class Implementation
5877//===----------------------------------------------------------------------===//
5878
5879ScalarEvolution::ScalarEvolution()
5880  : FunctionPass(ID), FirstUnknown(0) {
5881  initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
5882}
5883
5884bool ScalarEvolution::runOnFunction(Function &F) {
5885  this->F = &F;
5886  LI = &getAnalysis<LoopInfo>();
5887  TD = getAnalysisIfAvailable<TargetData>();
5888  DT = &getAnalysis<DominatorTree>();
5889  return false;
5890}
5891
5892void ScalarEvolution::releaseMemory() {
5893  // Iterate through all the SCEVUnknown instances and call their
5894  // destructors, so that they release their references to their values.
5895  for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
5896    U->~SCEVUnknown();
5897  FirstUnknown = 0;
5898
5899  ValueExprMap.clear();
5900  BackedgeTakenCounts.clear();
5901  ConstantEvolutionLoopExitValue.clear();
5902  ValuesAtScopes.clear();
5903  UnsignedRanges.clear();
5904  SignedRanges.clear();
5905  UniqueSCEVs.clear();
5906  SCEVAllocator.Reset();
5907}
5908
5909void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
5910  AU.setPreservesAll();
5911  AU.addRequiredTransitive<LoopInfo>();
5912  AU.addRequiredTransitive<DominatorTree>();
5913}
5914
5915bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
5916  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
5917}
5918
5919static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
5920                          const Loop *L) {
5921  // Print all inner loops first
5922  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
5923    PrintLoopInfo(OS, SE, *I);
5924
5925  OS << "Loop ";
5926  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5927  OS << ": ";
5928
5929  SmallVector<BasicBlock *, 8> ExitBlocks;
5930  L->getExitBlocks(ExitBlocks);
5931  if (ExitBlocks.size() != 1)
5932    OS << "<multiple exits> ";
5933
5934  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5935    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5936  } else {
5937    OS << "Unpredictable backedge-taken count. ";
5938  }
5939
5940  OS << "\n"
5941        "Loop ";
5942  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5943  OS << ": ";
5944
5945  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5946    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5947  } else {
5948    OS << "Unpredictable max backedge-taken count. ";
5949  }
5950
5951  OS << "\n";
5952}
5953
5954void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
5955  // ScalarEvolution's implementation of the print method is to print
5956  // out SCEV values of all instructions that are interesting. Doing
5957  // this potentially causes it to create new SCEV objects though,
5958  // which technically conflicts with the const qualifier. This isn't
5959  // observable from outside the class though, so casting away the
5960  // const isn't dangerous.
5961  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
5962
5963  OS << "Classifying expressions for: ";
5964  WriteAsOperand(OS, F, /*PrintType=*/false);
5965  OS << "\n";
5966  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5967    if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
5968      OS << *I << '\n';
5969      OS << "  -->  ";
5970      const SCEV *SV = SE.getSCEV(&*I);
5971      SV->print(OS);
5972
5973      const Loop *L = LI->getLoopFor((*I).getParent());
5974
5975      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5976      if (AtUse != SV) {
5977        OS << "  -->  ";
5978        AtUse->print(OS);
5979      }
5980
5981      if (L) {
5982        OS << "\t\t" "Exits: ";
5983        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5984        if (!ExitValue->isLoopInvariant(L)) {
5985          OS << "<<Unknown>>";
5986        } else {
5987          OS << *ExitValue;
5988        }
5989      }
5990
5991      OS << "\n";
5992    }
5993
5994  OS << "Determining loop execution counts for: ";
5995  WriteAsOperand(OS, F, /*PrintType=*/false);
5996  OS << "\n";
5997  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5998    PrintLoopInfo(OS, &SE, *I);
5999}
6000
6001