ScalarEvolution.cpp revision 2f199f9952b9dd62b5a0d0f4350b8fa780ebb9cc
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle. We only create one SCEV of a particular shape, so
18// pointer-comparisons for equality are legal.
19//
20// One important aspect of the SCEV objects is that they are never cyclic, even
21// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23// recurrence) then we represent it directly as a recurrence node, otherwise we
24// represent it as a SCEVUnknown node.
25//
26// In addition to being able to represent expressions of various types, we also
27// have folders that are used to build the *canonical* representation for a
28// particular expression.  These folders are capable of using a variety of
29// rewrite rules to simplify the expressions.
30//
31// Once the folders are defined, we can implement the more interesting
32// higher-level code, such as the code that recognizes PHI nodes of various
33// types, computes the execution count of a loop, etc.
34//
35// TODO: We should use these routines and value representations to implement
36// dependence analysis!
37//
38//===----------------------------------------------------------------------===//
39//
40// There are several good references for the techniques used in this analysis.
41//
42//  Chains of recurrences -- a method to expedite the evaluation
43//  of closed-form functions
44//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45//
46//  On computational properties of chains of recurrences
47//  Eugene V. Zima
48//
49//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50//  Robert A. van Engelen
51//
52//  Efficient Symbolic Analysis for Optimizing Compilers
53//  Robert A. van Engelen
54//
55//  Using the chains of recurrences algebra for data dependence testing and
56//  induction variable substitution
57//  MS Thesis, Johnie Birch
58//
59//===----------------------------------------------------------------------===//
60
61#define DEBUG_TYPE "scalar-evolution"
62#include "llvm/Analysis/ScalarEvolutionExpressions.h"
63#include "llvm/Constants.h"
64#include "llvm/DerivedTypes.h"
65#include "llvm/GlobalVariable.h"
66#include "llvm/GlobalAlias.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Operator.h"
70#include "llvm/Analysis/ConstantFolding.h"
71#include "llvm/Analysis/Dominators.h"
72#include "llvm/Analysis/LoopInfo.h"
73#include "llvm/Analysis/ValueTracking.h"
74#include "llvm/Assembly/Writer.h"
75#include "llvm/Target/TargetData.h"
76#include "llvm/Support/CommandLine.h"
77#include "llvm/Support/ConstantRange.h"
78#include "llvm/Support/Debug.h"
79#include "llvm/Support/ErrorHandling.h"
80#include "llvm/Support/GetElementPtrTypeIterator.h"
81#include "llvm/Support/InstIterator.h"
82#include "llvm/Support/MathExtras.h"
83#include "llvm/Support/raw_ostream.h"
84#include "llvm/ADT/Statistic.h"
85#include "llvm/ADT/STLExtras.h"
86#include "llvm/ADT/SmallPtrSet.h"
87#include <algorithm>
88using namespace llvm;
89
90STATISTIC(NumArrayLenItCounts,
91          "Number of trip counts computed with array length");
92STATISTIC(NumTripCountsComputed,
93          "Number of loops with predictable loop counts");
94STATISTIC(NumTripCountsNotComputed,
95          "Number of loops without predictable loop counts");
96STATISTIC(NumBruteForceTripCountsComputed,
97          "Number of loops with trip counts computed by force");
98
99static cl::opt<unsigned>
100MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
101                        cl::desc("Maximum number of iterations SCEV will "
102                                 "symbolically execute a constant "
103                                 "derived loop"),
104                        cl::init(100));
105
106INITIALIZE_PASS(ScalarEvolution, "scalar-evolution",
107                "Scalar Evolution Analysis", false, true);
108char ScalarEvolution::ID = 0;
109
110//===----------------------------------------------------------------------===//
111//                           SCEV class definitions
112//===----------------------------------------------------------------------===//
113
114//===----------------------------------------------------------------------===//
115// Implementation of the SCEV class.
116//
117
118SCEV::~SCEV() {}
119
120void SCEV::dump() const {
121  print(dbgs());
122  dbgs() << '\n';
123}
124
125bool SCEV::isZero() const {
126  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
127    return SC->getValue()->isZero();
128  return false;
129}
130
131bool SCEV::isOne() const {
132  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
133    return SC->getValue()->isOne();
134  return false;
135}
136
137bool SCEV::isAllOnesValue() const {
138  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
139    return SC->getValue()->isAllOnesValue();
140  return false;
141}
142
143SCEVCouldNotCompute::SCEVCouldNotCompute() :
144  SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
145
146bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
147  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
148  return false;
149}
150
151const Type *SCEVCouldNotCompute::getType() const {
152  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153  return 0;
154}
155
156bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
157  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158  return false;
159}
160
161bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
162  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
163  return false;
164}
165
166void SCEVCouldNotCompute::print(raw_ostream &OS) const {
167  OS << "***COULDNOTCOMPUTE***";
168}
169
170bool SCEVCouldNotCompute::classof(const SCEV *S) {
171  return S->getSCEVType() == scCouldNotCompute;
172}
173
174const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
175  FoldingSetNodeID ID;
176  ID.AddInteger(scConstant);
177  ID.AddPointer(V);
178  void *IP = 0;
179  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
180  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
181  UniqueSCEVs.InsertNode(S, IP);
182  return S;
183}
184
185const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
186  return getConstant(ConstantInt::get(getContext(), Val));
187}
188
189const SCEV *
190ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
191  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
192  return getConstant(ConstantInt::get(ITy, V, isSigned));
193}
194
195const Type *SCEVConstant::getType() const { return V->getType(); }
196
197void SCEVConstant::print(raw_ostream &OS) const {
198  WriteAsOperand(OS, V, false);
199}
200
201SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
202                           unsigned SCEVTy, const SCEV *op, const Type *ty)
203  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
204
205bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
206  return Op->dominates(BB, DT);
207}
208
209bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
210  return Op->properlyDominates(BB, DT);
211}
212
213SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
214                                   const SCEV *op, const Type *ty)
215  : SCEVCastExpr(ID, scTruncate, op, ty) {
216  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
217         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
218         "Cannot truncate non-integer value!");
219}
220
221void SCEVTruncateExpr::print(raw_ostream &OS) const {
222  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
223}
224
225SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
226                                       const SCEV *op, const Type *ty)
227  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
228  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
229         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
230         "Cannot zero extend non-integer value!");
231}
232
233void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
234  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
235}
236
237SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
238                                       const SCEV *op, const Type *ty)
239  : SCEVCastExpr(ID, scSignExtend, op, ty) {
240  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
241         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
242         "Cannot sign extend non-integer value!");
243}
244
245void SCEVSignExtendExpr::print(raw_ostream &OS) const {
246  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
247}
248
249void SCEVCommutativeExpr::print(raw_ostream &OS) const {
250  const char *OpStr = getOperationStr();
251  OS << "(";
252  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
253    OS << **I;
254    if (llvm::next(I) != E)
255      OS << OpStr;
256  }
257  OS << ")";
258}
259
260bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
261  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
262    if (!(*I)->dominates(BB, DT))
263      return false;
264  return true;
265}
266
267bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
268  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
269    if (!(*I)->properlyDominates(BB, DT))
270      return false;
271  return true;
272}
273
274bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const {
275  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
276    if (!(*I)->isLoopInvariant(L))
277      return false;
278  return true;
279}
280
281// hasComputableLoopEvolution - N-ary expressions have computable loop
282// evolutions iff they have at least one operand that varies with the loop,
283// but that all varying operands are computable.
284bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const {
285  bool HasVarying = false;
286  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
287    const SCEV *S = *I;
288    if (!S->isLoopInvariant(L)) {
289      if (S->hasComputableLoopEvolution(L))
290        HasVarying = true;
291      else
292        return false;
293    }
294  }
295  return HasVarying;
296}
297
298bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
299  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
300    const SCEV *S = *I;
301    if (O == S || S->hasOperand(O))
302      return true;
303  }
304  return false;
305}
306
307bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
308  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
309}
310
311bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
312  return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
313}
314
315void SCEVUDivExpr::print(raw_ostream &OS) const {
316  OS << "(" << *LHS << " /u " << *RHS << ")";
317}
318
319const Type *SCEVUDivExpr::getType() const {
320  // In most cases the types of LHS and RHS will be the same, but in some
321  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
322  // depend on the type for correctness, but handling types carefully can
323  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
324  // a pointer type than the RHS, so use the RHS' type here.
325  return RHS->getType();
326}
327
328bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
329  // Add recurrences are never invariant in the function-body (null loop).
330  if (!QueryLoop)
331    return false;
332
333  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
334  if (QueryLoop->contains(L))
335    return false;
336
337  // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop.
338  if (L->contains(QueryLoop))
339    return true;
340
341  // This recurrence is variant w.r.t. QueryLoop if any of its operands
342  // are variant.
343  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
344    if (!getOperand(i)->isLoopInvariant(QueryLoop))
345      return false;
346
347  // Otherwise it's loop-invariant.
348  return true;
349}
350
351bool
352SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
353  return DT->dominates(L->getHeader(), BB) &&
354         SCEVNAryExpr::dominates(BB, DT);
355}
356
357bool
358SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
359  // This uses a "dominates" query instead of "properly dominates" query because
360  // the instruction which produces the addrec's value is a PHI, and a PHI
361  // effectively properly dominates its entire containing block.
362  return DT->dominates(L->getHeader(), BB) &&
363         SCEVNAryExpr::properlyDominates(BB, DT);
364}
365
366void SCEVAddRecExpr::print(raw_ostream &OS) const {
367  OS << "{" << *Operands[0];
368  for (unsigned i = 1, e = NumOperands; i != e; ++i)
369    OS << ",+," << *Operands[i];
370  OS << "}<";
371  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
372  OS << ">";
373}
374
375void SCEVUnknown::deleted() {
376  // Clear this SCEVUnknown from ValuesAtScopes.
377  SE->ValuesAtScopes.erase(this);
378
379  // Remove this SCEVUnknown from the uniquing map.
380  SE->UniqueSCEVs.RemoveNode(this);
381
382  // Release the value.
383  setValPtr(0);
384}
385
386void SCEVUnknown::allUsesReplacedWith(Value *New) {
387  // Clear this SCEVUnknown from ValuesAtScopes.
388  SE->ValuesAtScopes.erase(this);
389
390  // Remove this SCEVUnknown from the uniquing map.
391  SE->UniqueSCEVs.RemoveNode(this);
392
393  // Update this SCEVUnknown to point to the new value. This is needed
394  // because there may still be outstanding SCEVs which still point to
395  // this SCEVUnknown.
396  setValPtr(New);
397}
398
399bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
400  // All non-instruction values are loop invariant.  All instructions are loop
401  // invariant if they are not contained in the specified loop.
402  // Instructions are never considered invariant in the function body
403  // (null loop) because they are defined within the "loop".
404  if (Instruction *I = dyn_cast<Instruction>(getValue()))
405    return L && !L->contains(I);
406  return true;
407}
408
409bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
410  if (Instruction *I = dyn_cast<Instruction>(getValue()))
411    return DT->dominates(I->getParent(), BB);
412  return true;
413}
414
415bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
416  if (Instruction *I = dyn_cast<Instruction>(getValue()))
417    return DT->properlyDominates(I->getParent(), BB);
418  return true;
419}
420
421const Type *SCEVUnknown::getType() const {
422  return getValue()->getType();
423}
424
425bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
426  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
427    if (VCE->getOpcode() == Instruction::PtrToInt)
428      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
429        if (CE->getOpcode() == Instruction::GetElementPtr &&
430            CE->getOperand(0)->isNullValue() &&
431            CE->getNumOperands() == 2)
432          if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
433            if (CI->isOne()) {
434              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
435                                 ->getElementType();
436              return true;
437            }
438
439  return false;
440}
441
442bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
443  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
444    if (VCE->getOpcode() == Instruction::PtrToInt)
445      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
446        if (CE->getOpcode() == Instruction::GetElementPtr &&
447            CE->getOperand(0)->isNullValue()) {
448          const Type *Ty =
449            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
450          if (const StructType *STy = dyn_cast<StructType>(Ty))
451            if (!STy->isPacked() &&
452                CE->getNumOperands() == 3 &&
453                CE->getOperand(1)->isNullValue()) {
454              if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
455                if (CI->isOne() &&
456                    STy->getNumElements() == 2 &&
457                    STy->getElementType(0)->isIntegerTy(1)) {
458                  AllocTy = STy->getElementType(1);
459                  return true;
460                }
461            }
462        }
463
464  return false;
465}
466
467bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
468  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
469    if (VCE->getOpcode() == Instruction::PtrToInt)
470      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
471        if (CE->getOpcode() == Instruction::GetElementPtr &&
472            CE->getNumOperands() == 3 &&
473            CE->getOperand(0)->isNullValue() &&
474            CE->getOperand(1)->isNullValue()) {
475          const Type *Ty =
476            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
477          // Ignore vector types here so that ScalarEvolutionExpander doesn't
478          // emit getelementptrs that index into vectors.
479          if (Ty->isStructTy() || Ty->isArrayTy()) {
480            CTy = Ty;
481            FieldNo = CE->getOperand(2);
482            return true;
483          }
484        }
485
486  return false;
487}
488
489void SCEVUnknown::print(raw_ostream &OS) const {
490  const Type *AllocTy;
491  if (isSizeOf(AllocTy)) {
492    OS << "sizeof(" << *AllocTy << ")";
493    return;
494  }
495  if (isAlignOf(AllocTy)) {
496    OS << "alignof(" << *AllocTy << ")";
497    return;
498  }
499
500  const Type *CTy;
501  Constant *FieldNo;
502  if (isOffsetOf(CTy, FieldNo)) {
503    OS << "offsetof(" << *CTy << ", ";
504    WriteAsOperand(OS, FieldNo, false);
505    OS << ")";
506    return;
507  }
508
509  // Otherwise just print it normally.
510  WriteAsOperand(OS, getValue(), false);
511}
512
513//===----------------------------------------------------------------------===//
514//                               SCEV Utilities
515//===----------------------------------------------------------------------===//
516
517namespace {
518  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
519  /// than the complexity of the RHS.  This comparator is used to canonicalize
520  /// expressions.
521  class SCEVComplexityCompare {
522    const LoopInfo *const LI;
523  public:
524    explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
525
526    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
527      // Fast-path: SCEVs are uniqued so we can do a quick equality check.
528      if (LHS == RHS)
529        return false;
530
531      // Primarily, sort the SCEVs by their getSCEVType().
532      unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
533      if (LType != RType)
534        return LType < RType;
535
536      // Aside from the getSCEVType() ordering, the particular ordering
537      // isn't very important except that it's beneficial to be consistent,
538      // so that (a + b) and (b + a) don't end up as different expressions.
539
540      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
541      // not as complete as it could be.
542      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
543        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
544        const Value *LV = LU->getValue(), *RV = RU->getValue();
545
546        // Order pointer values after integer values. This helps SCEVExpander
547        // form GEPs.
548        bool LIsPointer = LV->getType()->isPointerTy(),
549             RIsPointer = RV->getType()->isPointerTy();
550        if (LIsPointer != RIsPointer)
551          return RIsPointer;
552
553        // Compare getValueID values.
554        unsigned LID = LV->getValueID(),
555                 RID = RV->getValueID();
556        if (LID != RID)
557          return LID < RID;
558
559        // Sort arguments by their position.
560        if (const Argument *LA = dyn_cast<Argument>(LV)) {
561          const Argument *RA = cast<Argument>(RV);
562          return LA->getArgNo() < RA->getArgNo();
563        }
564
565        // For instructions, compare their loop depth, and their opcode.
566        // This is pretty loose.
567        if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
568          const Instruction *RInst = cast<Instruction>(RV);
569
570          // Compare loop depths.
571          const BasicBlock *LParent = LInst->getParent(),
572                           *RParent = RInst->getParent();
573          if (LParent != RParent) {
574            unsigned LDepth = LI->getLoopDepth(LParent),
575                     RDepth = LI->getLoopDepth(RParent);
576            if (LDepth != RDepth)
577              return LDepth < RDepth;
578          }
579
580          // Compare the number of operands.
581          unsigned LNumOps = LInst->getNumOperands(),
582                   RNumOps = RInst->getNumOperands();
583          if (LNumOps != RNumOps)
584            return LNumOps < RNumOps;
585        }
586
587        return false;
588      }
589
590      // Compare constant values.
591      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
592        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
593        const ConstantInt *LCC = LC->getValue();
594        const ConstantInt *RCC = RC->getValue();
595        unsigned LBitWidth = LCC->getBitWidth(), RBitWidth = RCC->getBitWidth();
596        if (LBitWidth != RBitWidth)
597          return LBitWidth < RBitWidth;
598        return LCC->getValue().ult(RCC->getValue());
599      }
600
601      // Compare addrec loop depths.
602      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
603        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
604        const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
605        if (LLoop != RLoop) {
606          unsigned LDepth = LLoop->getLoopDepth(),
607                   RDepth = RLoop->getLoopDepth();
608          if (LDepth != RDepth)
609            return LDepth < RDepth;
610        }
611      }
612
613      // Lexicographically compare n-ary expressions.
614      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
615        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
616        unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
617        for (unsigned i = 0; i != LNumOps; ++i) {
618          if (i >= RNumOps)
619            return false;
620          const SCEV *LOp = LC->getOperand(i), *ROp = RC->getOperand(i);
621          if (operator()(LOp, ROp))
622            return true;
623          if (operator()(ROp, LOp))
624            return false;
625        }
626        return LNumOps < RNumOps;
627      }
628
629      // Lexicographically compare udiv expressions.
630      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
631        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
632        const SCEV *LL = LC->getLHS(), *LR = LC->getRHS(),
633                   *RL = RC->getLHS(), *RR = RC->getRHS();
634        if (operator()(LL, RL))
635          return true;
636        if (operator()(RL, LL))
637          return false;
638        if (operator()(LR, RR))
639          return true;
640        if (operator()(RR, LR))
641          return false;
642        return false;
643      }
644
645      // Compare cast expressions by operand.
646      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
647        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
648        return operator()(LC->getOperand(), RC->getOperand());
649      }
650
651      llvm_unreachable("Unknown SCEV kind!");
652      return false;
653    }
654  };
655}
656
657/// GroupByComplexity - Given a list of SCEV objects, order them by their
658/// complexity, and group objects of the same complexity together by value.
659/// When this routine is finished, we know that any duplicates in the vector are
660/// consecutive and that complexity is monotonically increasing.
661///
662/// Note that we go take special precautions to ensure that we get deterministic
663/// results from this routine.  In other words, we don't want the results of
664/// this to depend on where the addresses of various SCEV objects happened to
665/// land in memory.
666///
667static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
668                              LoopInfo *LI) {
669  if (Ops.size() < 2) return;  // Noop
670  if (Ops.size() == 2) {
671    // This is the common case, which also happens to be trivially simple.
672    // Special case it.
673    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
674      std::swap(Ops[0], Ops[1]);
675    return;
676  }
677
678  // Do the rough sort by complexity.
679  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
680
681  // Now that we are sorted by complexity, group elements of the same
682  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
683  // be extremely short in practice.  Note that we take this approach because we
684  // do not want to depend on the addresses of the objects we are grouping.
685  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
686    const SCEV *S = Ops[i];
687    unsigned Complexity = S->getSCEVType();
688
689    // If there are any objects of the same complexity and same value as this
690    // one, group them.
691    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
692      if (Ops[j] == S) { // Found a duplicate.
693        // Move it to immediately after i'th element.
694        std::swap(Ops[i+1], Ops[j]);
695        ++i;   // no need to rescan it.
696        if (i == e-2) return;  // Done!
697      }
698    }
699  }
700}
701
702
703
704//===----------------------------------------------------------------------===//
705//                      Simple SCEV method implementations
706//===----------------------------------------------------------------------===//
707
708/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
709/// Assume, K > 0.
710static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
711                                       ScalarEvolution &SE,
712                                       const Type* ResultTy) {
713  // Handle the simplest case efficiently.
714  if (K == 1)
715    return SE.getTruncateOrZeroExtend(It, ResultTy);
716
717  // We are using the following formula for BC(It, K):
718  //
719  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
720  //
721  // Suppose, W is the bitwidth of the return value.  We must be prepared for
722  // overflow.  Hence, we must assure that the result of our computation is
723  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
724  // safe in modular arithmetic.
725  //
726  // However, this code doesn't use exactly that formula; the formula it uses
727  // is something like the following, where T is the number of factors of 2 in
728  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
729  // exponentiation:
730  //
731  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
732  //
733  // This formula is trivially equivalent to the previous formula.  However,
734  // this formula can be implemented much more efficiently.  The trick is that
735  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
736  // arithmetic.  To do exact division in modular arithmetic, all we have
737  // to do is multiply by the inverse.  Therefore, this step can be done at
738  // width W.
739  //
740  // The next issue is how to safely do the division by 2^T.  The way this
741  // is done is by doing the multiplication step at a width of at least W + T
742  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
743  // when we perform the division by 2^T (which is equivalent to a right shift
744  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
745  // truncated out after the division by 2^T.
746  //
747  // In comparison to just directly using the first formula, this technique
748  // is much more efficient; using the first formula requires W * K bits,
749  // but this formula less than W + K bits. Also, the first formula requires
750  // a division step, whereas this formula only requires multiplies and shifts.
751  //
752  // It doesn't matter whether the subtraction step is done in the calculation
753  // width or the input iteration count's width; if the subtraction overflows,
754  // the result must be zero anyway.  We prefer here to do it in the width of
755  // the induction variable because it helps a lot for certain cases; CodeGen
756  // isn't smart enough to ignore the overflow, which leads to much less
757  // efficient code if the width of the subtraction is wider than the native
758  // register width.
759  //
760  // (It's possible to not widen at all by pulling out factors of 2 before
761  // the multiplication; for example, K=2 can be calculated as
762  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
763  // extra arithmetic, so it's not an obvious win, and it gets
764  // much more complicated for K > 3.)
765
766  // Protection from insane SCEVs; this bound is conservative,
767  // but it probably doesn't matter.
768  if (K > 1000)
769    return SE.getCouldNotCompute();
770
771  unsigned W = SE.getTypeSizeInBits(ResultTy);
772
773  // Calculate K! / 2^T and T; we divide out the factors of two before
774  // multiplying for calculating K! / 2^T to avoid overflow.
775  // Other overflow doesn't matter because we only care about the bottom
776  // W bits of the result.
777  APInt OddFactorial(W, 1);
778  unsigned T = 1;
779  for (unsigned i = 3; i <= K; ++i) {
780    APInt Mult(W, i);
781    unsigned TwoFactors = Mult.countTrailingZeros();
782    T += TwoFactors;
783    Mult = Mult.lshr(TwoFactors);
784    OddFactorial *= Mult;
785  }
786
787  // We need at least W + T bits for the multiplication step
788  unsigned CalculationBits = W + T;
789
790  // Calculate 2^T, at width T+W.
791  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
792
793  // Calculate the multiplicative inverse of K! / 2^T;
794  // this multiplication factor will perform the exact division by
795  // K! / 2^T.
796  APInt Mod = APInt::getSignedMinValue(W+1);
797  APInt MultiplyFactor = OddFactorial.zext(W+1);
798  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
799  MultiplyFactor = MultiplyFactor.trunc(W);
800
801  // Calculate the product, at width T+W
802  const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
803                                                      CalculationBits);
804  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
805  for (unsigned i = 1; i != K; ++i) {
806    const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
807    Dividend = SE.getMulExpr(Dividend,
808                             SE.getTruncateOrZeroExtend(S, CalculationTy));
809  }
810
811  // Divide by 2^T
812  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
813
814  // Truncate the result, and divide by K! / 2^T.
815
816  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
817                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
818}
819
820/// evaluateAtIteration - Return the value of this chain of recurrences at
821/// the specified iteration number.  We can evaluate this recurrence by
822/// multiplying each element in the chain by the binomial coefficient
823/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
824///
825///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
826///
827/// where BC(It, k) stands for binomial coefficient.
828///
829const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
830                                                ScalarEvolution &SE) const {
831  const SCEV *Result = getStart();
832  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
833    // The computation is correct in the face of overflow provided that the
834    // multiplication is performed _after_ the evaluation of the binomial
835    // coefficient.
836    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
837    if (isa<SCEVCouldNotCompute>(Coeff))
838      return Coeff;
839
840    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
841  }
842  return Result;
843}
844
845//===----------------------------------------------------------------------===//
846//                    SCEV Expression folder implementations
847//===----------------------------------------------------------------------===//
848
849const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
850                                             const Type *Ty) {
851  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
852         "This is not a truncating conversion!");
853  assert(isSCEVable(Ty) &&
854         "This is not a conversion to a SCEVable type!");
855  Ty = getEffectiveSCEVType(Ty);
856
857  FoldingSetNodeID ID;
858  ID.AddInteger(scTruncate);
859  ID.AddPointer(Op);
860  ID.AddPointer(Ty);
861  void *IP = 0;
862  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
863
864  // Fold if the operand is constant.
865  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
866    return getConstant(
867      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
868                                               getEffectiveSCEVType(Ty))));
869
870  // trunc(trunc(x)) --> trunc(x)
871  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
872    return getTruncateExpr(ST->getOperand(), Ty);
873
874  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
875  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
876    return getTruncateOrSignExtend(SS->getOperand(), Ty);
877
878  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
879  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
880    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
881
882  // If the input value is a chrec scev, truncate the chrec's operands.
883  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
884    SmallVector<const SCEV *, 4> Operands;
885    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
886      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
887    return getAddRecExpr(Operands, AddRec->getLoop());
888  }
889
890  // As a special case, fold trunc(undef) to undef. We don't want to
891  // know too much about SCEVUnknowns, but this special case is handy
892  // and harmless.
893  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
894    if (isa<UndefValue>(U->getValue()))
895      return getSCEV(UndefValue::get(Ty));
896
897  // The cast wasn't folded; create an explicit cast node. We can reuse
898  // the existing insert position since if we get here, we won't have
899  // made any changes which would invalidate it.
900  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
901                                                 Op, Ty);
902  UniqueSCEVs.InsertNode(S, IP);
903  return S;
904}
905
906const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
907                                               const Type *Ty) {
908  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
909         "This is not an extending conversion!");
910  assert(isSCEVable(Ty) &&
911         "This is not a conversion to a SCEVable type!");
912  Ty = getEffectiveSCEVType(Ty);
913
914  // Fold if the operand is constant.
915  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
916    return getConstant(
917      cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
918                                              getEffectiveSCEVType(Ty))));
919
920  // zext(zext(x)) --> zext(x)
921  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
922    return getZeroExtendExpr(SZ->getOperand(), Ty);
923
924  // Before doing any expensive analysis, check to see if we've already
925  // computed a SCEV for this Op and Ty.
926  FoldingSetNodeID ID;
927  ID.AddInteger(scZeroExtend);
928  ID.AddPointer(Op);
929  ID.AddPointer(Ty);
930  void *IP = 0;
931  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
932
933  // If the input value is a chrec scev, and we can prove that the value
934  // did not overflow the old, smaller, value, we can zero extend all of the
935  // operands (often constants).  This allows analysis of something like
936  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
937  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
938    if (AR->isAffine()) {
939      const SCEV *Start = AR->getStart();
940      const SCEV *Step = AR->getStepRecurrence(*this);
941      unsigned BitWidth = getTypeSizeInBits(AR->getType());
942      const Loop *L = AR->getLoop();
943
944      // If we have special knowledge that this addrec won't overflow,
945      // we don't need to do any further analysis.
946      if (AR->hasNoUnsignedWrap())
947        return getAddRecExpr(getZeroExtendExpr(Start, Ty),
948                             getZeroExtendExpr(Step, Ty),
949                             L);
950
951      // Check whether the backedge-taken count is SCEVCouldNotCompute.
952      // Note that this serves two purposes: It filters out loops that are
953      // simply not analyzable, and it covers the case where this code is
954      // being called from within backedge-taken count analysis, such that
955      // attempting to ask for the backedge-taken count would likely result
956      // in infinite recursion. In the later case, the analysis code will
957      // cope with a conservative value, and it will take care to purge
958      // that value once it has finished.
959      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
960      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
961        // Manually compute the final value for AR, checking for
962        // overflow.
963
964        // Check whether the backedge-taken count can be losslessly casted to
965        // the addrec's type. The count is always unsigned.
966        const SCEV *CastedMaxBECount =
967          getTruncateOrZeroExtend(MaxBECount, Start->getType());
968        const SCEV *RecastedMaxBECount =
969          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
970        if (MaxBECount == RecastedMaxBECount) {
971          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
972          // Check whether Start+Step*MaxBECount has no unsigned overflow.
973          const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
974          const SCEV *Add = getAddExpr(Start, ZMul);
975          const SCEV *OperandExtendedAdd =
976            getAddExpr(getZeroExtendExpr(Start, WideTy),
977                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
978                                  getZeroExtendExpr(Step, WideTy)));
979          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
980            // Return the expression with the addrec on the outside.
981            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
982                                 getZeroExtendExpr(Step, Ty),
983                                 L);
984
985          // Similar to above, only this time treat the step value as signed.
986          // This covers loops that count down.
987          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
988          Add = getAddExpr(Start, SMul);
989          OperandExtendedAdd =
990            getAddExpr(getZeroExtendExpr(Start, WideTy),
991                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
992                                  getSignExtendExpr(Step, WideTy)));
993          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
994            // Return the expression with the addrec on the outside.
995            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
996                                 getSignExtendExpr(Step, Ty),
997                                 L);
998        }
999
1000        // If the backedge is guarded by a comparison with the pre-inc value
1001        // the addrec is safe. Also, if the entry is guarded by a comparison
1002        // with the start value and the backedge is guarded by a comparison
1003        // with the post-inc value, the addrec is safe.
1004        if (isKnownPositive(Step)) {
1005          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1006                                      getUnsignedRange(Step).getUnsignedMax());
1007          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1008              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
1009               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
1010                                           AR->getPostIncExpr(*this), N)))
1011            // Return the expression with the addrec on the outside.
1012            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1013                                 getZeroExtendExpr(Step, Ty),
1014                                 L);
1015        } else if (isKnownNegative(Step)) {
1016          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1017                                      getSignedRange(Step).getSignedMin());
1018          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1019              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
1020               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
1021                                           AR->getPostIncExpr(*this), N)))
1022            // Return the expression with the addrec on the outside.
1023            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1024                                 getSignExtendExpr(Step, Ty),
1025                                 L);
1026        }
1027      }
1028    }
1029
1030  // The cast wasn't folded; create an explicit cast node.
1031  // Recompute the insert position, as it may have been invalidated.
1032  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1033  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1034                                                   Op, Ty);
1035  UniqueSCEVs.InsertNode(S, IP);
1036  return S;
1037}
1038
1039const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1040                                               const Type *Ty) {
1041  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1042         "This is not an extending conversion!");
1043  assert(isSCEVable(Ty) &&
1044         "This is not a conversion to a SCEVable type!");
1045  Ty = getEffectiveSCEVType(Ty);
1046
1047  // Fold if the operand is constant.
1048  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1049    return getConstant(
1050      cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
1051                                              getEffectiveSCEVType(Ty))));
1052
1053  // sext(sext(x)) --> sext(x)
1054  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1055    return getSignExtendExpr(SS->getOperand(), Ty);
1056
1057  // Before doing any expensive analysis, check to see if we've already
1058  // computed a SCEV for this Op and Ty.
1059  FoldingSetNodeID ID;
1060  ID.AddInteger(scSignExtend);
1061  ID.AddPointer(Op);
1062  ID.AddPointer(Ty);
1063  void *IP = 0;
1064  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1065
1066  // If the input value is a chrec scev, and we can prove that the value
1067  // did not overflow the old, smaller, value, we can sign extend all of the
1068  // operands (often constants).  This allows analysis of something like
1069  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1070  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1071    if (AR->isAffine()) {
1072      const SCEV *Start = AR->getStart();
1073      const SCEV *Step = AR->getStepRecurrence(*this);
1074      unsigned BitWidth = getTypeSizeInBits(AR->getType());
1075      const Loop *L = AR->getLoop();
1076
1077      // If we have special knowledge that this addrec won't overflow,
1078      // we don't need to do any further analysis.
1079      if (AR->hasNoSignedWrap())
1080        return getAddRecExpr(getSignExtendExpr(Start, Ty),
1081                             getSignExtendExpr(Step, Ty),
1082                             L);
1083
1084      // Check whether the backedge-taken count is SCEVCouldNotCompute.
1085      // Note that this serves two purposes: It filters out loops that are
1086      // simply not analyzable, and it covers the case where this code is
1087      // being called from within backedge-taken count analysis, such that
1088      // attempting to ask for the backedge-taken count would likely result
1089      // in infinite recursion. In the later case, the analysis code will
1090      // cope with a conservative value, and it will take care to purge
1091      // that value once it has finished.
1092      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1093      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1094        // Manually compute the final value for AR, checking for
1095        // overflow.
1096
1097        // Check whether the backedge-taken count can be losslessly casted to
1098        // the addrec's type. The count is always unsigned.
1099        const SCEV *CastedMaxBECount =
1100          getTruncateOrZeroExtend(MaxBECount, Start->getType());
1101        const SCEV *RecastedMaxBECount =
1102          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1103        if (MaxBECount == RecastedMaxBECount) {
1104          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1105          // Check whether Start+Step*MaxBECount has no signed overflow.
1106          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1107          const SCEV *Add = getAddExpr(Start, SMul);
1108          const SCEV *OperandExtendedAdd =
1109            getAddExpr(getSignExtendExpr(Start, WideTy),
1110                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1111                                  getSignExtendExpr(Step, WideTy)));
1112          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1113            // Return the expression with the addrec on the outside.
1114            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1115                                 getSignExtendExpr(Step, Ty),
1116                                 L);
1117
1118          // Similar to above, only this time treat the step value as unsigned.
1119          // This covers loops that count up with an unsigned step.
1120          const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
1121          Add = getAddExpr(Start, UMul);
1122          OperandExtendedAdd =
1123            getAddExpr(getSignExtendExpr(Start, WideTy),
1124                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1125                                  getZeroExtendExpr(Step, WideTy)));
1126          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1127            // Return the expression with the addrec on the outside.
1128            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1129                                 getZeroExtendExpr(Step, Ty),
1130                                 L);
1131        }
1132
1133        // If the backedge is guarded by a comparison with the pre-inc value
1134        // the addrec is safe. Also, if the entry is guarded by a comparison
1135        // with the start value and the backedge is guarded by a comparison
1136        // with the post-inc value, the addrec is safe.
1137        if (isKnownPositive(Step)) {
1138          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
1139                                      getSignedRange(Step).getSignedMax());
1140          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
1141              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
1142               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
1143                                           AR->getPostIncExpr(*this), N)))
1144            // Return the expression with the addrec on the outside.
1145            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1146                                 getSignExtendExpr(Step, Ty),
1147                                 L);
1148        } else if (isKnownNegative(Step)) {
1149          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
1150                                      getSignedRange(Step).getSignedMin());
1151          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1152              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1153               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1154                                           AR->getPostIncExpr(*this), N)))
1155            // Return the expression with the addrec on the outside.
1156            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1157                                 getSignExtendExpr(Step, Ty),
1158                                 L);
1159        }
1160      }
1161    }
1162
1163  // The cast wasn't folded; create an explicit cast node.
1164  // Recompute the insert position, as it may have been invalidated.
1165  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1166  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1167                                                   Op, Ty);
1168  UniqueSCEVs.InsertNode(S, IP);
1169  return S;
1170}
1171
1172/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1173/// unspecified bits out to the given type.
1174///
1175const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1176                                              const Type *Ty) {
1177  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1178         "This is not an extending conversion!");
1179  assert(isSCEVable(Ty) &&
1180         "This is not a conversion to a SCEVable type!");
1181  Ty = getEffectiveSCEVType(Ty);
1182
1183  // Sign-extend negative constants.
1184  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1185    if (SC->getValue()->getValue().isNegative())
1186      return getSignExtendExpr(Op, Ty);
1187
1188  // Peel off a truncate cast.
1189  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1190    const SCEV *NewOp = T->getOperand();
1191    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1192      return getAnyExtendExpr(NewOp, Ty);
1193    return getTruncateOrNoop(NewOp, Ty);
1194  }
1195
1196  // Next try a zext cast. If the cast is folded, use it.
1197  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1198  if (!isa<SCEVZeroExtendExpr>(ZExt))
1199    return ZExt;
1200
1201  // Next try a sext cast. If the cast is folded, use it.
1202  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1203  if (!isa<SCEVSignExtendExpr>(SExt))
1204    return SExt;
1205
1206  // Force the cast to be folded into the operands of an addrec.
1207  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1208    SmallVector<const SCEV *, 4> Ops;
1209    for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1210         I != E; ++I)
1211      Ops.push_back(getAnyExtendExpr(*I, Ty));
1212    return getAddRecExpr(Ops, AR->getLoop());
1213  }
1214
1215  // As a special case, fold anyext(undef) to undef. We don't want to
1216  // know too much about SCEVUnknowns, but this special case is handy
1217  // and harmless.
1218  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
1219    if (isa<UndefValue>(U->getValue()))
1220      return getSCEV(UndefValue::get(Ty));
1221
1222  // If the expression is obviously signed, use the sext cast value.
1223  if (isa<SCEVSMaxExpr>(Op))
1224    return SExt;
1225
1226  // Absent any other information, use the zext cast value.
1227  return ZExt;
1228}
1229
1230/// CollectAddOperandsWithScales - Process the given Ops list, which is
1231/// a list of operands to be added under the given scale, update the given
1232/// map. This is a helper function for getAddRecExpr. As an example of
1233/// what it does, given a sequence of operands that would form an add
1234/// expression like this:
1235///
1236///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1237///
1238/// where A and B are constants, update the map with these values:
1239///
1240///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1241///
1242/// and add 13 + A*B*29 to AccumulatedConstant.
1243/// This will allow getAddRecExpr to produce this:
1244///
1245///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1246///
1247/// This form often exposes folding opportunities that are hidden in
1248/// the original operand list.
1249///
1250/// Return true iff it appears that any interesting folding opportunities
1251/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1252/// the common case where no interesting opportunities are present, and
1253/// is also used as a check to avoid infinite recursion.
1254///
1255static bool
1256CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1257                             SmallVector<const SCEV *, 8> &NewOps,
1258                             APInt &AccumulatedConstant,
1259                             const SCEV *const *Ops, size_t NumOperands,
1260                             const APInt &Scale,
1261                             ScalarEvolution &SE) {
1262  bool Interesting = false;
1263
1264  // Iterate over the add operands. They are sorted, with constants first.
1265  unsigned i = 0;
1266  while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1267    ++i;
1268    // Pull a buried constant out to the outside.
1269    if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1270      Interesting = true;
1271    AccumulatedConstant += Scale * C->getValue()->getValue();
1272  }
1273
1274  // Next comes everything else. We're especially interested in multiplies
1275  // here, but they're in the middle, so just visit the rest with one loop.
1276  for (; i != NumOperands; ++i) {
1277    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1278    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1279      APInt NewScale =
1280        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1281      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1282        // A multiplication of a constant with another add; recurse.
1283        const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1284        Interesting |=
1285          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1286                                       Add->op_begin(), Add->getNumOperands(),
1287                                       NewScale, SE);
1288      } else {
1289        // A multiplication of a constant with some other value. Update
1290        // the map.
1291        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1292        const SCEV *Key = SE.getMulExpr(MulOps);
1293        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1294          M.insert(std::make_pair(Key, NewScale));
1295        if (Pair.second) {
1296          NewOps.push_back(Pair.first->first);
1297        } else {
1298          Pair.first->second += NewScale;
1299          // The map already had an entry for this value, which may indicate
1300          // a folding opportunity.
1301          Interesting = true;
1302        }
1303      }
1304    } else {
1305      // An ordinary operand. Update the map.
1306      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1307        M.insert(std::make_pair(Ops[i], Scale));
1308      if (Pair.second) {
1309        NewOps.push_back(Pair.first->first);
1310      } else {
1311        Pair.first->second += Scale;
1312        // The map already had an entry for this value, which may indicate
1313        // a folding opportunity.
1314        Interesting = true;
1315      }
1316    }
1317  }
1318
1319  return Interesting;
1320}
1321
1322namespace {
1323  struct APIntCompare {
1324    bool operator()(const APInt &LHS, const APInt &RHS) const {
1325      return LHS.ult(RHS);
1326    }
1327  };
1328}
1329
1330/// getAddExpr - Get a canonical add expression, or something simpler if
1331/// possible.
1332const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1333                                        bool HasNUW, bool HasNSW) {
1334  assert(!Ops.empty() && "Cannot get empty add!");
1335  if (Ops.size() == 1) return Ops[0];
1336#ifndef NDEBUG
1337  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1338  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1339    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1340           "SCEVAddExpr operand types don't match!");
1341#endif
1342
1343  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1344  if (!HasNUW && HasNSW) {
1345    bool All = true;
1346    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1347      if (!isKnownNonNegative(Ops[i])) {
1348        All = false;
1349        break;
1350      }
1351    if (All) HasNUW = true;
1352  }
1353
1354  // Sort by complexity, this groups all similar expression types together.
1355  GroupByComplexity(Ops, LI);
1356
1357  // If there are any constants, fold them together.
1358  unsigned Idx = 0;
1359  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1360    ++Idx;
1361    assert(Idx < Ops.size());
1362    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1363      // We found two constants, fold them together!
1364      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1365                           RHSC->getValue()->getValue());
1366      if (Ops.size() == 2) return Ops[0];
1367      Ops.erase(Ops.begin()+1);  // Erase the folded element
1368      LHSC = cast<SCEVConstant>(Ops[0]);
1369    }
1370
1371    // If we are left with a constant zero being added, strip it off.
1372    if (LHSC->getValue()->isZero()) {
1373      Ops.erase(Ops.begin());
1374      --Idx;
1375    }
1376
1377    if (Ops.size() == 1) return Ops[0];
1378  }
1379
1380  // Okay, check to see if the same value occurs in the operand list twice.  If
1381  // so, merge them together into an multiply expression.  Since we sorted the
1382  // list, these values are required to be adjacent.
1383  const Type *Ty = Ops[0]->getType();
1384  bool FoundMatch = false;
1385  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1386    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1387      // Found a match, merge the two values into a multiply, and add any
1388      // remaining values to the result.
1389      const SCEV *Two = getConstant(Ty, 2);
1390      const SCEV *Mul = getMulExpr(Two, Ops[i]);
1391      if (Ops.size() == 2)
1392        return Mul;
1393      Ops[i] = Mul;
1394      Ops.erase(Ops.begin()+i+1);
1395      --i; --e;
1396      FoundMatch = true;
1397    }
1398  if (FoundMatch)
1399    return getAddExpr(Ops, HasNUW, HasNSW);
1400
1401  // Check for truncates. If all the operands are truncated from the same
1402  // type, see if factoring out the truncate would permit the result to be
1403  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1404  // if the contents of the resulting outer trunc fold to something simple.
1405  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1406    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1407    const Type *DstType = Trunc->getType();
1408    const Type *SrcType = Trunc->getOperand()->getType();
1409    SmallVector<const SCEV *, 8> LargeOps;
1410    bool Ok = true;
1411    // Check all the operands to see if they can be represented in the
1412    // source type of the truncate.
1413    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1414      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1415        if (T->getOperand()->getType() != SrcType) {
1416          Ok = false;
1417          break;
1418        }
1419        LargeOps.push_back(T->getOperand());
1420      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1421        LargeOps.push_back(getAnyExtendExpr(C, SrcType));
1422      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1423        SmallVector<const SCEV *, 8> LargeMulOps;
1424        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1425          if (const SCEVTruncateExpr *T =
1426                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1427            if (T->getOperand()->getType() != SrcType) {
1428              Ok = false;
1429              break;
1430            }
1431            LargeMulOps.push_back(T->getOperand());
1432          } else if (const SCEVConstant *C =
1433                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1434            LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
1435          } else {
1436            Ok = false;
1437            break;
1438          }
1439        }
1440        if (Ok)
1441          LargeOps.push_back(getMulExpr(LargeMulOps));
1442      } else {
1443        Ok = false;
1444        break;
1445      }
1446    }
1447    if (Ok) {
1448      // Evaluate the expression in the larger type.
1449      const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
1450      // If it folds to something simple, use it. Otherwise, don't.
1451      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1452        return getTruncateExpr(Fold, DstType);
1453    }
1454  }
1455
1456  // Skip past any other cast SCEVs.
1457  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1458    ++Idx;
1459
1460  // If there are add operands they would be next.
1461  if (Idx < Ops.size()) {
1462    bool DeletedAdd = false;
1463    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1464      // If we have an add, expand the add operands onto the end of the operands
1465      // list.
1466      Ops.erase(Ops.begin()+Idx);
1467      Ops.append(Add->op_begin(), Add->op_end());
1468      DeletedAdd = true;
1469    }
1470
1471    // If we deleted at least one add, we added operands to the end of the list,
1472    // and they are not necessarily sorted.  Recurse to resort and resimplify
1473    // any operands we just acquired.
1474    if (DeletedAdd)
1475      return getAddExpr(Ops);
1476  }
1477
1478  // Skip over the add expression until we get to a multiply.
1479  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1480    ++Idx;
1481
1482  // Check to see if there are any folding opportunities present with
1483  // operands multiplied by constant values.
1484  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1485    uint64_t BitWidth = getTypeSizeInBits(Ty);
1486    DenseMap<const SCEV *, APInt> M;
1487    SmallVector<const SCEV *, 8> NewOps;
1488    APInt AccumulatedConstant(BitWidth, 0);
1489    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1490                                     Ops.data(), Ops.size(),
1491                                     APInt(BitWidth, 1), *this)) {
1492      // Some interesting folding opportunity is present, so its worthwhile to
1493      // re-generate the operands list. Group the operands by constant scale,
1494      // to avoid multiplying by the same constant scale multiple times.
1495      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1496      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1497           E = NewOps.end(); I != E; ++I)
1498        MulOpLists[M.find(*I)->second].push_back(*I);
1499      // Re-generate the operands list.
1500      Ops.clear();
1501      if (AccumulatedConstant != 0)
1502        Ops.push_back(getConstant(AccumulatedConstant));
1503      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1504           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1505        if (I->first != 0)
1506          Ops.push_back(getMulExpr(getConstant(I->first),
1507                                   getAddExpr(I->second)));
1508      if (Ops.empty())
1509        return getConstant(Ty, 0);
1510      if (Ops.size() == 1)
1511        return Ops[0];
1512      return getAddExpr(Ops);
1513    }
1514  }
1515
1516  // If we are adding something to a multiply expression, make sure the
1517  // something is not already an operand of the multiply.  If so, merge it into
1518  // the multiply.
1519  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1520    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1521    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1522      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1523      if (isa<SCEVConstant>(MulOpSCEV))
1524        continue;
1525      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1526        if (MulOpSCEV == Ops[AddOp]) {
1527          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1528          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1529          if (Mul->getNumOperands() != 2) {
1530            // If the multiply has more than two operands, we must get the
1531            // Y*Z term.
1532            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1533            MulOps.erase(MulOps.begin()+MulOp);
1534            InnerMul = getMulExpr(MulOps);
1535          }
1536          const SCEV *One = getConstant(Ty, 1);
1537          const SCEV *AddOne = getAddExpr(One, InnerMul);
1538          const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
1539          if (Ops.size() == 2) return OuterMul;
1540          if (AddOp < Idx) {
1541            Ops.erase(Ops.begin()+AddOp);
1542            Ops.erase(Ops.begin()+Idx-1);
1543          } else {
1544            Ops.erase(Ops.begin()+Idx);
1545            Ops.erase(Ops.begin()+AddOp-1);
1546          }
1547          Ops.push_back(OuterMul);
1548          return getAddExpr(Ops);
1549        }
1550
1551      // Check this multiply against other multiplies being added together.
1552      bool AnyFold = false;
1553      for (unsigned OtherMulIdx = Idx+1;
1554           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1555           ++OtherMulIdx) {
1556        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1557        // If MulOp occurs in OtherMul, we can fold the two multiplies
1558        // together.
1559        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1560             OMulOp != e; ++OMulOp)
1561          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1562            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1563            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1564            if (Mul->getNumOperands() != 2) {
1565              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1566                                                  Mul->op_end());
1567              MulOps.erase(MulOps.begin()+MulOp);
1568              InnerMul1 = getMulExpr(MulOps);
1569            }
1570            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1571            if (OtherMul->getNumOperands() != 2) {
1572              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1573                                                  OtherMul->op_end());
1574              MulOps.erase(MulOps.begin()+OMulOp);
1575              InnerMul2 = getMulExpr(MulOps);
1576            }
1577            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1578            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1579            if (Ops.size() == 2) return OuterMul;
1580            Ops[Idx] = OuterMul;
1581            Ops.erase(Ops.begin()+OtherMulIdx);
1582            OtherMulIdx = Idx;
1583            AnyFold = true;
1584          }
1585      }
1586      if (AnyFold)
1587        return getAddExpr(Ops);
1588    }
1589  }
1590
1591  // If there are any add recurrences in the operands list, see if any other
1592  // added values are loop invariant.  If so, we can fold them into the
1593  // recurrence.
1594  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1595    ++Idx;
1596
1597  // Scan over all recurrences, trying to fold loop invariants into them.
1598  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1599    // Scan all of the other operands to this add and add them to the vector if
1600    // they are loop invariant w.r.t. the recurrence.
1601    SmallVector<const SCEV *, 8> LIOps;
1602    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1603    const Loop *AddRecLoop = AddRec->getLoop();
1604    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1605      if (Ops[i]->isLoopInvariant(AddRecLoop)) {
1606        LIOps.push_back(Ops[i]);
1607        Ops.erase(Ops.begin()+i);
1608        --i; --e;
1609      }
1610
1611    // If we found some loop invariants, fold them into the recurrence.
1612    if (!LIOps.empty()) {
1613      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1614      LIOps.push_back(AddRec->getStart());
1615
1616      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1617                                             AddRec->op_end());
1618      AddRecOps[0] = getAddExpr(LIOps);
1619
1620      // Build the new addrec. Propagate the NUW and NSW flags if both the
1621      // outer add and the inner addrec are guaranteed to have no overflow.
1622      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
1623                                         HasNUW && AddRec->hasNoUnsignedWrap(),
1624                                         HasNSW && AddRec->hasNoSignedWrap());
1625
1626      // If all of the other operands were loop invariant, we are done.
1627      if (Ops.size() == 1) return NewRec;
1628
1629      // Otherwise, add the folded AddRec by the non-liv parts.
1630      for (unsigned i = 0;; ++i)
1631        if (Ops[i] == AddRec) {
1632          Ops[i] = NewRec;
1633          break;
1634        }
1635      return getAddExpr(Ops);
1636    }
1637
1638    // Okay, if there weren't any loop invariants to be folded, check to see if
1639    // there are multiple AddRec's with the same loop induction variable being
1640    // added together.  If so, we can fold them.
1641    for (unsigned OtherIdx = Idx+1;
1642         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1643      if (OtherIdx != Idx) {
1644        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1645        if (AddRecLoop == OtherAddRec->getLoop()) {
1646          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1647          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1648                                              AddRec->op_end());
1649          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1650            if (i >= NewOps.size()) {
1651              NewOps.append(OtherAddRec->op_begin()+i,
1652                            OtherAddRec->op_end());
1653              break;
1654            }
1655            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1656          }
1657          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop);
1658
1659          if (Ops.size() == 2) return NewAddRec;
1660
1661          Ops.erase(Ops.begin()+Idx);
1662          Ops.erase(Ops.begin()+OtherIdx-1);
1663          Ops.push_back(NewAddRec);
1664          return getAddExpr(Ops);
1665        }
1666      }
1667
1668    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1669    // next one.
1670  }
1671
1672  // Okay, it looks like we really DO need an add expr.  Check to see if we
1673  // already have one, otherwise create a new one.
1674  FoldingSetNodeID ID;
1675  ID.AddInteger(scAddExpr);
1676  ID.AddInteger(Ops.size());
1677  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1678    ID.AddPointer(Ops[i]);
1679  void *IP = 0;
1680  SCEVAddExpr *S =
1681    static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1682  if (!S) {
1683    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1684    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1685    S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1686                                        O, Ops.size());
1687    UniqueSCEVs.InsertNode(S, IP);
1688  }
1689  if (HasNUW) S->setHasNoUnsignedWrap(true);
1690  if (HasNSW) S->setHasNoSignedWrap(true);
1691  return S;
1692}
1693
1694/// getMulExpr - Get a canonical multiply expression, or something simpler if
1695/// possible.
1696const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1697                                        bool HasNUW, bool HasNSW) {
1698  assert(!Ops.empty() && "Cannot get empty mul!");
1699  if (Ops.size() == 1) return Ops[0];
1700#ifndef NDEBUG
1701  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1702  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1703    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1704           "SCEVMulExpr operand types don't match!");
1705#endif
1706
1707  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1708  if (!HasNUW && HasNSW) {
1709    bool All = true;
1710    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1711      if (!isKnownNonNegative(Ops[i])) {
1712        All = false;
1713        break;
1714      }
1715    if (All) HasNUW = true;
1716  }
1717
1718  // Sort by complexity, this groups all similar expression types together.
1719  GroupByComplexity(Ops, LI);
1720
1721  // If there are any constants, fold them together.
1722  unsigned Idx = 0;
1723  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1724
1725    // C1*(C2+V) -> C1*C2 + C1*V
1726    if (Ops.size() == 2)
1727      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1728        if (Add->getNumOperands() == 2 &&
1729            isa<SCEVConstant>(Add->getOperand(0)))
1730          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1731                            getMulExpr(LHSC, Add->getOperand(1)));
1732
1733    ++Idx;
1734    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1735      // We found two constants, fold them together!
1736      ConstantInt *Fold = ConstantInt::get(getContext(),
1737                                           LHSC->getValue()->getValue() *
1738                                           RHSC->getValue()->getValue());
1739      Ops[0] = getConstant(Fold);
1740      Ops.erase(Ops.begin()+1);  // Erase the folded element
1741      if (Ops.size() == 1) return Ops[0];
1742      LHSC = cast<SCEVConstant>(Ops[0]);
1743    }
1744
1745    // If we are left with a constant one being multiplied, strip it off.
1746    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1747      Ops.erase(Ops.begin());
1748      --Idx;
1749    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1750      // If we have a multiply of zero, it will always be zero.
1751      return Ops[0];
1752    } else if (Ops[0]->isAllOnesValue()) {
1753      // If we have a mul by -1 of an add, try distributing the -1 among the
1754      // add operands.
1755      if (Ops.size() == 2)
1756        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1757          SmallVector<const SCEV *, 4> NewOps;
1758          bool AnyFolded = false;
1759          for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1760               I != E; ++I) {
1761            const SCEV *Mul = getMulExpr(Ops[0], *I);
1762            if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1763            NewOps.push_back(Mul);
1764          }
1765          if (AnyFolded)
1766            return getAddExpr(NewOps);
1767        }
1768    }
1769
1770    if (Ops.size() == 1)
1771      return Ops[0];
1772  }
1773
1774  // Skip over the add expression until we get to a multiply.
1775  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1776    ++Idx;
1777
1778  // If there are mul operands inline them all into this expression.
1779  if (Idx < Ops.size()) {
1780    bool DeletedMul = false;
1781    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1782      // If we have an mul, expand the mul operands onto the end of the operands
1783      // list.
1784      Ops.erase(Ops.begin()+Idx);
1785      Ops.append(Mul->op_begin(), Mul->op_end());
1786      DeletedMul = true;
1787    }
1788
1789    // If we deleted at least one mul, we added operands to the end of the list,
1790    // and they are not necessarily sorted.  Recurse to resort and resimplify
1791    // any operands we just acquired.
1792    if (DeletedMul)
1793      return getMulExpr(Ops);
1794  }
1795
1796  // If there are any add recurrences in the operands list, see if any other
1797  // added values are loop invariant.  If so, we can fold them into the
1798  // recurrence.
1799  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1800    ++Idx;
1801
1802  // Scan over all recurrences, trying to fold loop invariants into them.
1803  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1804    // Scan all of the other operands to this mul and add them to the vector if
1805    // they are loop invariant w.r.t. the recurrence.
1806    SmallVector<const SCEV *, 8> LIOps;
1807    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1808    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1809      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1810        LIOps.push_back(Ops[i]);
1811        Ops.erase(Ops.begin()+i);
1812        --i; --e;
1813      }
1814
1815    // If we found some loop invariants, fold them into the recurrence.
1816    if (!LIOps.empty()) {
1817      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1818      SmallVector<const SCEV *, 4> NewOps;
1819      NewOps.reserve(AddRec->getNumOperands());
1820      const SCEV *Scale = getMulExpr(LIOps);
1821      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1822        NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1823
1824      // Build the new addrec. Propagate the NUW and NSW flags if both the
1825      // outer mul and the inner addrec are guaranteed to have no overflow.
1826      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
1827                                         HasNUW && AddRec->hasNoUnsignedWrap(),
1828                                         HasNSW && AddRec->hasNoSignedWrap());
1829
1830      // If all of the other operands were loop invariant, we are done.
1831      if (Ops.size() == 1) return NewRec;
1832
1833      // Otherwise, multiply the folded AddRec by the non-liv parts.
1834      for (unsigned i = 0;; ++i)
1835        if (Ops[i] == AddRec) {
1836          Ops[i] = NewRec;
1837          break;
1838        }
1839      return getMulExpr(Ops);
1840    }
1841
1842    // Okay, if there weren't any loop invariants to be folded, check to see if
1843    // there are multiple AddRec's with the same loop induction variable being
1844    // multiplied together.  If so, we can fold them.
1845    for (unsigned OtherIdx = Idx+1;
1846         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1847      if (OtherIdx != Idx) {
1848        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1849        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1850          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1851          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1852          const SCEV *NewStart = getMulExpr(F->getStart(),
1853                                                 G->getStart());
1854          const SCEV *B = F->getStepRecurrence(*this);
1855          const SCEV *D = G->getStepRecurrence(*this);
1856          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1857                                          getMulExpr(G, B),
1858                                          getMulExpr(B, D));
1859          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1860                                               F->getLoop());
1861          if (Ops.size() == 2) return NewAddRec;
1862
1863          Ops.erase(Ops.begin()+Idx);
1864          Ops.erase(Ops.begin()+OtherIdx-1);
1865          Ops.push_back(NewAddRec);
1866          return getMulExpr(Ops);
1867        }
1868      }
1869
1870    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1871    // next one.
1872  }
1873
1874  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1875  // already have one, otherwise create a new one.
1876  FoldingSetNodeID ID;
1877  ID.AddInteger(scMulExpr);
1878  ID.AddInteger(Ops.size());
1879  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1880    ID.AddPointer(Ops[i]);
1881  void *IP = 0;
1882  SCEVMulExpr *S =
1883    static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1884  if (!S) {
1885    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1886    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1887    S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
1888                                        O, Ops.size());
1889    UniqueSCEVs.InsertNode(S, IP);
1890  }
1891  if (HasNUW) S->setHasNoUnsignedWrap(true);
1892  if (HasNSW) S->setHasNoSignedWrap(true);
1893  return S;
1894}
1895
1896/// getUDivExpr - Get a canonical unsigned division expression, or something
1897/// simpler if possible.
1898const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1899                                         const SCEV *RHS) {
1900  assert(getEffectiveSCEVType(LHS->getType()) ==
1901         getEffectiveSCEVType(RHS->getType()) &&
1902         "SCEVUDivExpr operand types don't match!");
1903
1904  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1905    if (RHSC->getValue()->equalsInt(1))
1906      return LHS;                               // X udiv 1 --> x
1907    // If the denominator is zero, the result of the udiv is undefined. Don't
1908    // try to analyze it, because the resolution chosen here may differ from
1909    // the resolution chosen in other parts of the compiler.
1910    if (!RHSC->getValue()->isZero()) {
1911      // Determine if the division can be folded into the operands of
1912      // its operands.
1913      // TODO: Generalize this to non-constants by using known-bits information.
1914      const Type *Ty = LHS->getType();
1915      unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1916      unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
1917      // For non-power-of-two values, effectively round the value up to the
1918      // nearest power of two.
1919      if (!RHSC->getValue()->getValue().isPowerOf2())
1920        ++MaxShiftAmt;
1921      const IntegerType *ExtTy =
1922        IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1923      // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1924      if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1925        if (const SCEVConstant *Step =
1926              dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1927          if (!Step->getValue()->getValue()
1928                .urem(RHSC->getValue()->getValue()) &&
1929              getZeroExtendExpr(AR, ExtTy) ==
1930              getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1931                            getZeroExtendExpr(Step, ExtTy),
1932                            AR->getLoop())) {
1933            SmallVector<const SCEV *, 4> Operands;
1934            for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1935              Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1936            return getAddRecExpr(Operands, AR->getLoop());
1937          }
1938      // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1939      if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1940        SmallVector<const SCEV *, 4> Operands;
1941        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1942          Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1943        if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1944          // Find an operand that's safely divisible.
1945          for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1946            const SCEV *Op = M->getOperand(i);
1947            const SCEV *Div = getUDivExpr(Op, RHSC);
1948            if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1949              Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
1950                                                      M->op_end());
1951              Operands[i] = Div;
1952              return getMulExpr(Operands);
1953            }
1954          }
1955      }
1956      // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1957      if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1958        SmallVector<const SCEV *, 4> Operands;
1959        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1960          Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1961        if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1962          Operands.clear();
1963          for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1964            const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1965            if (isa<SCEVUDivExpr>(Op) ||
1966                getMulExpr(Op, RHS) != A->getOperand(i))
1967              break;
1968            Operands.push_back(Op);
1969          }
1970          if (Operands.size() == A->getNumOperands())
1971            return getAddExpr(Operands);
1972        }
1973      }
1974
1975      // Fold if both operands are constant.
1976      if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1977        Constant *LHSCV = LHSC->getValue();
1978        Constant *RHSCV = RHSC->getValue();
1979        return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1980                                                                   RHSCV)));
1981      }
1982    }
1983  }
1984
1985  FoldingSetNodeID ID;
1986  ID.AddInteger(scUDivExpr);
1987  ID.AddPointer(LHS);
1988  ID.AddPointer(RHS);
1989  void *IP = 0;
1990  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1991  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
1992                                             LHS, RHS);
1993  UniqueSCEVs.InsertNode(S, IP);
1994  return S;
1995}
1996
1997
1998/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1999/// Simplify the expression as much as possible.
2000const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
2001                                           const SCEV *Step, const Loop *L,
2002                                           bool HasNUW, bool HasNSW) {
2003  SmallVector<const SCEV *, 4> Operands;
2004  Operands.push_back(Start);
2005  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2006    if (StepChrec->getLoop() == L) {
2007      Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2008      return getAddRecExpr(Operands, L);
2009    }
2010
2011  Operands.push_back(Step);
2012  return getAddRecExpr(Operands, L, HasNUW, HasNSW);
2013}
2014
2015/// getAddRecExpr - Get an add recurrence expression for the specified loop.
2016/// Simplify the expression as much as possible.
2017const SCEV *
2018ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2019                               const Loop *L,
2020                               bool HasNUW, bool HasNSW) {
2021  if (Operands.size() == 1) return Operands[0];
2022#ifndef NDEBUG
2023  const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2024  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2025    assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2026           "SCEVAddRecExpr operand types don't match!");
2027#endif
2028
2029  if (Operands.back()->isZero()) {
2030    Operands.pop_back();
2031    return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0}  -->  X
2032  }
2033
2034  // It's tempting to want to call getMaxBackedgeTakenCount count here and
2035  // use that information to infer NUW and NSW flags. However, computing a
2036  // BE count requires calling getAddRecExpr, so we may not yet have a
2037  // meaningful BE count at this point (and if we don't, we'd be stuck
2038  // with a SCEVCouldNotCompute as the cached BE count).
2039
2040  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
2041  if (!HasNUW && HasNSW) {
2042    bool All = true;
2043    for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2044      if (!isKnownNonNegative(Operands[i])) {
2045        All = false;
2046        break;
2047      }
2048    if (All) HasNUW = true;
2049  }
2050
2051  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2052  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2053    const Loop *NestedLoop = NestedAR->getLoop();
2054    if (L->contains(NestedLoop) ?
2055        (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2056        (!NestedLoop->contains(L) &&
2057         DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2058      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2059                                                  NestedAR->op_end());
2060      Operands[0] = NestedAR->getStart();
2061      // AddRecs require their operands be loop-invariant with respect to their
2062      // loops. Don't perform this transformation if it would break this
2063      // requirement.
2064      bool AllInvariant = true;
2065      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2066        if (!Operands[i]->isLoopInvariant(L)) {
2067          AllInvariant = false;
2068          break;
2069        }
2070      if (AllInvariant) {
2071        NestedOperands[0] = getAddRecExpr(Operands, L);
2072        AllInvariant = true;
2073        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2074          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
2075            AllInvariant = false;
2076            break;
2077          }
2078        if (AllInvariant)
2079          // Ok, both add recurrences are valid after the transformation.
2080          return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
2081      }
2082      // Reset Operands to its original state.
2083      Operands[0] = NestedAR;
2084    }
2085  }
2086
2087  // Okay, it looks like we really DO need an addrec expr.  Check to see if we
2088  // already have one, otherwise create a new one.
2089  FoldingSetNodeID ID;
2090  ID.AddInteger(scAddRecExpr);
2091  ID.AddInteger(Operands.size());
2092  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2093    ID.AddPointer(Operands[i]);
2094  ID.AddPointer(L);
2095  void *IP = 0;
2096  SCEVAddRecExpr *S =
2097    static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2098  if (!S) {
2099    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2100    std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2101    S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2102                                           O, Operands.size(), L);
2103    UniqueSCEVs.InsertNode(S, IP);
2104  }
2105  if (HasNUW) S->setHasNoUnsignedWrap(true);
2106  if (HasNSW) S->setHasNoSignedWrap(true);
2107  return S;
2108}
2109
2110const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2111                                         const SCEV *RHS) {
2112  SmallVector<const SCEV *, 2> Ops;
2113  Ops.push_back(LHS);
2114  Ops.push_back(RHS);
2115  return getSMaxExpr(Ops);
2116}
2117
2118const SCEV *
2119ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2120  assert(!Ops.empty() && "Cannot get empty smax!");
2121  if (Ops.size() == 1) return Ops[0];
2122#ifndef NDEBUG
2123  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2124  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2125    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2126           "SCEVSMaxExpr operand types don't match!");
2127#endif
2128
2129  // Sort by complexity, this groups all similar expression types together.
2130  GroupByComplexity(Ops, LI);
2131
2132  // If there are any constants, fold them together.
2133  unsigned Idx = 0;
2134  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2135    ++Idx;
2136    assert(Idx < Ops.size());
2137    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2138      // We found two constants, fold them together!
2139      ConstantInt *Fold = ConstantInt::get(getContext(),
2140                              APIntOps::smax(LHSC->getValue()->getValue(),
2141                                             RHSC->getValue()->getValue()));
2142      Ops[0] = getConstant(Fold);
2143      Ops.erase(Ops.begin()+1);  // Erase the folded element
2144      if (Ops.size() == 1) return Ops[0];
2145      LHSC = cast<SCEVConstant>(Ops[0]);
2146    }
2147
2148    // If we are left with a constant minimum-int, strip it off.
2149    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2150      Ops.erase(Ops.begin());
2151      --Idx;
2152    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2153      // If we have an smax with a constant maximum-int, it will always be
2154      // maximum-int.
2155      return Ops[0];
2156    }
2157
2158    if (Ops.size() == 1) return Ops[0];
2159  }
2160
2161  // Find the first SMax
2162  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2163    ++Idx;
2164
2165  // Check to see if one of the operands is an SMax. If so, expand its operands
2166  // onto our operand list, and recurse to simplify.
2167  if (Idx < Ops.size()) {
2168    bool DeletedSMax = false;
2169    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2170      Ops.erase(Ops.begin()+Idx);
2171      Ops.append(SMax->op_begin(), SMax->op_end());
2172      DeletedSMax = true;
2173    }
2174
2175    if (DeletedSMax)
2176      return getSMaxExpr(Ops);
2177  }
2178
2179  // Okay, check to see if the same value occurs in the operand list twice.  If
2180  // so, delete one.  Since we sorted the list, these values are required to
2181  // be adjacent.
2182  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2183    //  X smax Y smax Y  -->  X smax Y
2184    //  X smax Y         -->  X, if X is always greater than Y
2185    if (Ops[i] == Ops[i+1] ||
2186        isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2187      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2188      --i; --e;
2189    } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2190      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2191      --i; --e;
2192    }
2193
2194  if (Ops.size() == 1) return Ops[0];
2195
2196  assert(!Ops.empty() && "Reduced smax down to nothing!");
2197
2198  // Okay, it looks like we really DO need an smax expr.  Check to see if we
2199  // already have one, otherwise create a new one.
2200  FoldingSetNodeID ID;
2201  ID.AddInteger(scSMaxExpr);
2202  ID.AddInteger(Ops.size());
2203  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2204    ID.AddPointer(Ops[i]);
2205  void *IP = 0;
2206  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2207  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2208  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2209  SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2210                                             O, Ops.size());
2211  UniqueSCEVs.InsertNode(S, IP);
2212  return S;
2213}
2214
2215const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2216                                         const SCEV *RHS) {
2217  SmallVector<const SCEV *, 2> Ops;
2218  Ops.push_back(LHS);
2219  Ops.push_back(RHS);
2220  return getUMaxExpr(Ops);
2221}
2222
2223const SCEV *
2224ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2225  assert(!Ops.empty() && "Cannot get empty umax!");
2226  if (Ops.size() == 1) return Ops[0];
2227#ifndef NDEBUG
2228  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2229  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2230    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2231           "SCEVUMaxExpr operand types don't match!");
2232#endif
2233
2234  // Sort by complexity, this groups all similar expression types together.
2235  GroupByComplexity(Ops, LI);
2236
2237  // If there are any constants, fold them together.
2238  unsigned Idx = 0;
2239  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2240    ++Idx;
2241    assert(Idx < Ops.size());
2242    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2243      // We found two constants, fold them together!
2244      ConstantInt *Fold = ConstantInt::get(getContext(),
2245                              APIntOps::umax(LHSC->getValue()->getValue(),
2246                                             RHSC->getValue()->getValue()));
2247      Ops[0] = getConstant(Fold);
2248      Ops.erase(Ops.begin()+1);  // Erase the folded element
2249      if (Ops.size() == 1) return Ops[0];
2250      LHSC = cast<SCEVConstant>(Ops[0]);
2251    }
2252
2253    // If we are left with a constant minimum-int, strip it off.
2254    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2255      Ops.erase(Ops.begin());
2256      --Idx;
2257    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2258      // If we have an umax with a constant maximum-int, it will always be
2259      // maximum-int.
2260      return Ops[0];
2261    }
2262
2263    if (Ops.size() == 1) return Ops[0];
2264  }
2265
2266  // Find the first UMax
2267  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2268    ++Idx;
2269
2270  // Check to see if one of the operands is a UMax. If so, expand its operands
2271  // onto our operand list, and recurse to simplify.
2272  if (Idx < Ops.size()) {
2273    bool DeletedUMax = false;
2274    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2275      Ops.erase(Ops.begin()+Idx);
2276      Ops.append(UMax->op_begin(), UMax->op_end());
2277      DeletedUMax = true;
2278    }
2279
2280    if (DeletedUMax)
2281      return getUMaxExpr(Ops);
2282  }
2283
2284  // Okay, check to see if the same value occurs in the operand list twice.  If
2285  // so, delete one.  Since we sorted the list, these values are required to
2286  // be adjacent.
2287  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2288    //  X umax Y umax Y  -->  X umax Y
2289    //  X umax Y         -->  X, if X is always greater than Y
2290    if (Ops[i] == Ops[i+1] ||
2291        isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2292      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2293      --i; --e;
2294    } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
2295      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2296      --i; --e;
2297    }
2298
2299  if (Ops.size() == 1) return Ops[0];
2300
2301  assert(!Ops.empty() && "Reduced umax down to nothing!");
2302
2303  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2304  // already have one, otherwise create a new one.
2305  FoldingSetNodeID ID;
2306  ID.AddInteger(scUMaxExpr);
2307  ID.AddInteger(Ops.size());
2308  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2309    ID.AddPointer(Ops[i]);
2310  void *IP = 0;
2311  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2312  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2313  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2314  SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2315                                             O, Ops.size());
2316  UniqueSCEVs.InsertNode(S, IP);
2317  return S;
2318}
2319
2320const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2321                                         const SCEV *RHS) {
2322  // ~smax(~x, ~y) == smin(x, y).
2323  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2324}
2325
2326const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2327                                         const SCEV *RHS) {
2328  // ~umax(~x, ~y) == umin(x, y)
2329  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2330}
2331
2332const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
2333  // If we have TargetData, we can bypass creating a target-independent
2334  // constant expression and then folding it back into a ConstantInt.
2335  // This is just a compile-time optimization.
2336  if (TD)
2337    return getConstant(TD->getIntPtrType(getContext()),
2338                       TD->getTypeAllocSize(AllocTy));
2339
2340  Constant *C = ConstantExpr::getSizeOf(AllocTy);
2341  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2342    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2343      C = Folded;
2344  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2345  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2346}
2347
2348const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
2349  Constant *C = ConstantExpr::getAlignOf(AllocTy);
2350  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2351    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2352      C = Folded;
2353  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2354  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2355}
2356
2357const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
2358                                             unsigned FieldNo) {
2359  // If we have TargetData, we can bypass creating a target-independent
2360  // constant expression and then folding it back into a ConstantInt.
2361  // This is just a compile-time optimization.
2362  if (TD)
2363    return getConstant(TD->getIntPtrType(getContext()),
2364                       TD->getStructLayout(STy)->getElementOffset(FieldNo));
2365
2366  Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2367  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2368    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2369      C = Folded;
2370  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2371  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2372}
2373
2374const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
2375                                             Constant *FieldNo) {
2376  Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2377  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2378    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2379      C = Folded;
2380  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2381  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2382}
2383
2384const SCEV *ScalarEvolution::getUnknown(Value *V) {
2385  // Don't attempt to do anything other than create a SCEVUnknown object
2386  // here.  createSCEV only calls getUnknown after checking for all other
2387  // interesting possibilities, and any other code that calls getUnknown
2388  // is doing so in order to hide a value from SCEV canonicalization.
2389
2390  FoldingSetNodeID ID;
2391  ID.AddInteger(scUnknown);
2392  ID.AddPointer(V);
2393  void *IP = 0;
2394  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
2395    assert(cast<SCEVUnknown>(S)->getValue() == V &&
2396           "Stale SCEVUnknown in uniquing map!");
2397    return S;
2398  }
2399  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
2400                                            FirstUnknown);
2401  FirstUnknown = cast<SCEVUnknown>(S);
2402  UniqueSCEVs.InsertNode(S, IP);
2403  return S;
2404}
2405
2406//===----------------------------------------------------------------------===//
2407//            Basic SCEV Analysis and PHI Idiom Recognition Code
2408//
2409
2410/// isSCEVable - Test if values of the given type are analyzable within
2411/// the SCEV framework. This primarily includes integer types, and it
2412/// can optionally include pointer types if the ScalarEvolution class
2413/// has access to target-specific information.
2414bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2415  // Integers and pointers are always SCEVable.
2416  return Ty->isIntegerTy() || Ty->isPointerTy();
2417}
2418
2419/// getTypeSizeInBits - Return the size in bits of the specified type,
2420/// for which isSCEVable must return true.
2421uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2422  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2423
2424  // If we have a TargetData, use it!
2425  if (TD)
2426    return TD->getTypeSizeInBits(Ty);
2427
2428  // Integer types have fixed sizes.
2429  if (Ty->isIntegerTy())
2430    return Ty->getPrimitiveSizeInBits();
2431
2432  // The only other support type is pointer. Without TargetData, conservatively
2433  // assume pointers are 64-bit.
2434  assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2435  return 64;
2436}
2437
2438/// getEffectiveSCEVType - Return a type with the same bitwidth as
2439/// the given type and which represents how SCEV will treat the given
2440/// type, for which isSCEVable must return true. For pointer types,
2441/// this is the pointer-sized integer type.
2442const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2443  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2444
2445  if (Ty->isIntegerTy())
2446    return Ty;
2447
2448  // The only other support type is pointer.
2449  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2450  if (TD) return TD->getIntPtrType(getContext());
2451
2452  // Without TargetData, conservatively assume pointers are 64-bit.
2453  return Type::getInt64Ty(getContext());
2454}
2455
2456const SCEV *ScalarEvolution::getCouldNotCompute() {
2457  return &CouldNotCompute;
2458}
2459
2460/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2461/// expression and create a new one.
2462const SCEV *ScalarEvolution::getSCEV(Value *V) {
2463  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2464
2465  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2466  if (I != Scalars.end()) return I->second;
2467  const SCEV *S = createSCEV(V);
2468  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2469  return S;
2470}
2471
2472/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2473///
2474const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2475  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2476    return getConstant(
2477               cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2478
2479  const Type *Ty = V->getType();
2480  Ty = getEffectiveSCEVType(Ty);
2481  return getMulExpr(V,
2482                  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2483}
2484
2485/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2486const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2487  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2488    return getConstant(
2489                cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2490
2491  const Type *Ty = V->getType();
2492  Ty = getEffectiveSCEVType(Ty);
2493  const SCEV *AllOnes =
2494                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2495  return getMinusSCEV(AllOnes, V);
2496}
2497
2498/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2499///
2500const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2501                                          const SCEV *RHS) {
2502  // Fast path: X - X --> 0.
2503  if (LHS == RHS)
2504    return getConstant(LHS->getType(), 0);
2505
2506  // X - Y --> X + -Y
2507  return getAddExpr(LHS, getNegativeSCEV(RHS));
2508}
2509
2510/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2511/// input value to the specified type.  If the type must be extended, it is zero
2512/// extended.
2513const SCEV *
2514ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2515                                         const Type *Ty) {
2516  const Type *SrcTy = V->getType();
2517  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2518         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2519         "Cannot truncate or zero extend with non-integer arguments!");
2520  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2521    return V;  // No conversion
2522  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2523    return getTruncateExpr(V, Ty);
2524  return getZeroExtendExpr(V, Ty);
2525}
2526
2527/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2528/// input value to the specified type.  If the type must be extended, it is sign
2529/// extended.
2530const SCEV *
2531ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2532                                         const Type *Ty) {
2533  const Type *SrcTy = V->getType();
2534  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2535         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2536         "Cannot truncate or zero extend with non-integer arguments!");
2537  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2538    return V;  // No conversion
2539  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2540    return getTruncateExpr(V, Ty);
2541  return getSignExtendExpr(V, Ty);
2542}
2543
2544/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2545/// input value to the specified type.  If the type must be extended, it is zero
2546/// extended.  The conversion must not be narrowing.
2547const SCEV *
2548ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2549  const Type *SrcTy = V->getType();
2550  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2551         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2552         "Cannot noop or zero extend with non-integer arguments!");
2553  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2554         "getNoopOrZeroExtend cannot truncate!");
2555  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2556    return V;  // No conversion
2557  return getZeroExtendExpr(V, Ty);
2558}
2559
2560/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2561/// input value to the specified type.  If the type must be extended, it is sign
2562/// extended.  The conversion must not be narrowing.
2563const SCEV *
2564ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2565  const Type *SrcTy = V->getType();
2566  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2567         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2568         "Cannot noop or sign extend with non-integer arguments!");
2569  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2570         "getNoopOrSignExtend cannot truncate!");
2571  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2572    return V;  // No conversion
2573  return getSignExtendExpr(V, Ty);
2574}
2575
2576/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2577/// the input value to the specified type. If the type must be extended,
2578/// it is extended with unspecified bits. The conversion must not be
2579/// narrowing.
2580const SCEV *
2581ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2582  const Type *SrcTy = V->getType();
2583  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2584         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2585         "Cannot noop or any extend with non-integer arguments!");
2586  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2587         "getNoopOrAnyExtend cannot truncate!");
2588  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2589    return V;  // No conversion
2590  return getAnyExtendExpr(V, Ty);
2591}
2592
2593/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2594/// input value to the specified type.  The conversion must not be widening.
2595const SCEV *
2596ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2597  const Type *SrcTy = V->getType();
2598  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2599         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2600         "Cannot truncate or noop with non-integer arguments!");
2601  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2602         "getTruncateOrNoop cannot extend!");
2603  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2604    return V;  // No conversion
2605  return getTruncateExpr(V, Ty);
2606}
2607
2608/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2609/// the types using zero-extension, and then perform a umax operation
2610/// with them.
2611const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2612                                                        const SCEV *RHS) {
2613  const SCEV *PromotedLHS = LHS;
2614  const SCEV *PromotedRHS = RHS;
2615
2616  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2617    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2618  else
2619    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2620
2621  return getUMaxExpr(PromotedLHS, PromotedRHS);
2622}
2623
2624/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2625/// the types using zero-extension, and then perform a umin operation
2626/// with them.
2627const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2628                                                        const SCEV *RHS) {
2629  const SCEV *PromotedLHS = LHS;
2630  const SCEV *PromotedRHS = RHS;
2631
2632  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2633    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2634  else
2635    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2636
2637  return getUMinExpr(PromotedLHS, PromotedRHS);
2638}
2639
2640/// PushDefUseChildren - Push users of the given Instruction
2641/// onto the given Worklist.
2642static void
2643PushDefUseChildren(Instruction *I,
2644                   SmallVectorImpl<Instruction *> &Worklist) {
2645  // Push the def-use children onto the Worklist stack.
2646  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2647       UI != UE; ++UI)
2648    Worklist.push_back(cast<Instruction>(*UI));
2649}
2650
2651/// ForgetSymbolicValue - This looks up computed SCEV values for all
2652/// instructions that depend on the given instruction and removes them from
2653/// the Scalars map if they reference SymName. This is used during PHI
2654/// resolution.
2655void
2656ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2657  SmallVector<Instruction *, 16> Worklist;
2658  PushDefUseChildren(PN, Worklist);
2659
2660  SmallPtrSet<Instruction *, 8> Visited;
2661  Visited.insert(PN);
2662  while (!Worklist.empty()) {
2663    Instruction *I = Worklist.pop_back_val();
2664    if (!Visited.insert(I)) continue;
2665
2666    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
2667      Scalars.find(static_cast<Value *>(I));
2668    if (It != Scalars.end()) {
2669      // Short-circuit the def-use traversal if the symbolic name
2670      // ceases to appear in expressions.
2671      if (It->second != SymName && !It->second->hasOperand(SymName))
2672        continue;
2673
2674      // SCEVUnknown for a PHI either means that it has an unrecognized
2675      // structure, it's a PHI that's in the progress of being computed
2676      // by createNodeForPHI, or it's a single-value PHI. In the first case,
2677      // additional loop trip count information isn't going to change anything.
2678      // In the second case, createNodeForPHI will perform the necessary
2679      // updates on its own when it gets to that point. In the third, we do
2680      // want to forget the SCEVUnknown.
2681      if (!isa<PHINode>(I) ||
2682          !isa<SCEVUnknown>(It->second) ||
2683          (I != PN && It->second == SymName)) {
2684        ValuesAtScopes.erase(It->second);
2685        Scalars.erase(It);
2686      }
2687    }
2688
2689    PushDefUseChildren(I, Worklist);
2690  }
2691}
2692
2693/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2694/// a loop header, making it a potential recurrence, or it doesn't.
2695///
2696const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2697  if (const Loop *L = LI->getLoopFor(PN->getParent()))
2698    if (L->getHeader() == PN->getParent()) {
2699      // The loop may have multiple entrances or multiple exits; we can analyze
2700      // this phi as an addrec if it has a unique entry value and a unique
2701      // backedge value.
2702      Value *BEValueV = 0, *StartValueV = 0;
2703      for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2704        Value *V = PN->getIncomingValue(i);
2705        if (L->contains(PN->getIncomingBlock(i))) {
2706          if (!BEValueV) {
2707            BEValueV = V;
2708          } else if (BEValueV != V) {
2709            BEValueV = 0;
2710            break;
2711          }
2712        } else if (!StartValueV) {
2713          StartValueV = V;
2714        } else if (StartValueV != V) {
2715          StartValueV = 0;
2716          break;
2717        }
2718      }
2719      if (BEValueV && StartValueV) {
2720        // While we are analyzing this PHI node, handle its value symbolically.
2721        const SCEV *SymbolicName = getUnknown(PN);
2722        assert(Scalars.find(PN) == Scalars.end() &&
2723               "PHI node already processed?");
2724        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2725
2726        // Using this symbolic name for the PHI, analyze the value coming around
2727        // the back-edge.
2728        const SCEV *BEValue = getSCEV(BEValueV);
2729
2730        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2731        // has a special value for the first iteration of the loop.
2732
2733        // If the value coming around the backedge is an add with the symbolic
2734        // value we just inserted, then we found a simple induction variable!
2735        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2736          // If there is a single occurrence of the symbolic value, replace it
2737          // with a recurrence.
2738          unsigned FoundIndex = Add->getNumOperands();
2739          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2740            if (Add->getOperand(i) == SymbolicName)
2741              if (FoundIndex == e) {
2742                FoundIndex = i;
2743                break;
2744              }
2745
2746          if (FoundIndex != Add->getNumOperands()) {
2747            // Create an add with everything but the specified operand.
2748            SmallVector<const SCEV *, 8> Ops;
2749            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2750              if (i != FoundIndex)
2751                Ops.push_back(Add->getOperand(i));
2752            const SCEV *Accum = getAddExpr(Ops);
2753
2754            // This is not a valid addrec if the step amount is varying each
2755            // loop iteration, but is not itself an addrec in this loop.
2756            if (Accum->isLoopInvariant(L) ||
2757                (isa<SCEVAddRecExpr>(Accum) &&
2758                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2759              bool HasNUW = false;
2760              bool HasNSW = false;
2761
2762              // If the increment doesn't overflow, then neither the addrec nor
2763              // the post-increment will overflow.
2764              if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
2765                if (OBO->hasNoUnsignedWrap())
2766                  HasNUW = true;
2767                if (OBO->hasNoSignedWrap())
2768                  HasNSW = true;
2769              }
2770
2771              const SCEV *StartVal = getSCEV(StartValueV);
2772              const SCEV *PHISCEV =
2773                getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
2774
2775              // Since the no-wrap flags are on the increment, they apply to the
2776              // post-incremented value as well.
2777              if (Accum->isLoopInvariant(L))
2778                (void)getAddRecExpr(getAddExpr(StartVal, Accum),
2779                                    Accum, L, HasNUW, HasNSW);
2780
2781              // Okay, for the entire analysis of this edge we assumed the PHI
2782              // to be symbolic.  We now need to go back and purge all of the
2783              // entries for the scalars that use the symbolic expression.
2784              ForgetSymbolicName(PN, SymbolicName);
2785              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2786              return PHISCEV;
2787            }
2788          }
2789        } else if (const SCEVAddRecExpr *AddRec =
2790                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2791          // Otherwise, this could be a loop like this:
2792          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2793          // In this case, j = {1,+,1}  and BEValue is j.
2794          // Because the other in-value of i (0) fits the evolution of BEValue
2795          // i really is an addrec evolution.
2796          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2797            const SCEV *StartVal = getSCEV(StartValueV);
2798
2799            // If StartVal = j.start - j.stride, we can use StartVal as the
2800            // initial step of the addrec evolution.
2801            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2802                                         AddRec->getOperand(1))) {
2803              const SCEV *PHISCEV =
2804                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2805
2806              // Okay, for the entire analysis of this edge we assumed the PHI
2807              // to be symbolic.  We now need to go back and purge all of the
2808              // entries for the scalars that use the symbolic expression.
2809              ForgetSymbolicName(PN, SymbolicName);
2810              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2811              return PHISCEV;
2812            }
2813          }
2814        }
2815      }
2816    }
2817
2818  // If the PHI has a single incoming value, follow that value, unless the
2819  // PHI's incoming blocks are in a different loop, in which case doing so
2820  // risks breaking LCSSA form. Instcombine would normally zap these, but
2821  // it doesn't have DominatorTree information, so it may miss cases.
2822  if (Value *V = PN->hasConstantValue(DT)) {
2823    bool AllSameLoop = true;
2824    Loop *PNLoop = LI->getLoopFor(PN->getParent());
2825    for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2826      if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
2827        AllSameLoop = false;
2828        break;
2829      }
2830    if (AllSameLoop)
2831      return getSCEV(V);
2832  }
2833
2834  // If it's not a loop phi, we can't handle it yet.
2835  return getUnknown(PN);
2836}
2837
2838/// createNodeForGEP - Expand GEP instructions into add and multiply
2839/// operations. This allows them to be analyzed by regular SCEV code.
2840///
2841const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
2842
2843  // Don't blindly transfer the inbounds flag from the GEP instruction to the
2844  // Add expression, because the Instruction may be guarded by control flow
2845  // and the no-overflow bits may not be valid for the expression in any
2846  // context.
2847
2848  const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
2849  Value *Base = GEP->getOperand(0);
2850  // Don't attempt to analyze GEPs over unsized objects.
2851  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2852    return getUnknown(GEP);
2853  const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
2854  gep_type_iterator GTI = gep_type_begin(GEP);
2855  for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
2856                                      E = GEP->op_end();
2857       I != E; ++I) {
2858    Value *Index = *I;
2859    // Compute the (potentially symbolic) offset in bytes for this index.
2860    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2861      // For a struct, add the member offset.
2862      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2863      const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
2864
2865      // Add the field offset to the running total offset.
2866      TotalOffset = getAddExpr(TotalOffset, FieldOffset);
2867    } else {
2868      // For an array, add the element offset, explicitly scaled.
2869      const SCEV *ElementSize = getSizeOfExpr(*GTI);
2870      const SCEV *IndexS = getSCEV(Index);
2871      // Getelementptr indices are signed.
2872      IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
2873
2874      // Multiply the index by the element size to compute the element offset.
2875      const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
2876
2877      // Add the element offset to the running total offset.
2878      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2879    }
2880  }
2881
2882  // Get the SCEV for the GEP base.
2883  const SCEV *BaseS = getSCEV(Base);
2884
2885  // Add the total offset from all the GEP indices to the base.
2886  return getAddExpr(BaseS, TotalOffset);
2887}
2888
2889/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2890/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2891/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2892/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2893uint32_t
2894ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2895  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2896    return C->getValue()->getValue().countTrailingZeros();
2897
2898  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2899    return std::min(GetMinTrailingZeros(T->getOperand()),
2900                    (uint32_t)getTypeSizeInBits(T->getType()));
2901
2902  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2903    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2904    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2905             getTypeSizeInBits(E->getType()) : OpRes;
2906  }
2907
2908  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2909    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2910    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2911             getTypeSizeInBits(E->getType()) : OpRes;
2912  }
2913
2914  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2915    // The result is the min of all operands results.
2916    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2917    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2918      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2919    return MinOpRes;
2920  }
2921
2922  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2923    // The result is the sum of all operands results.
2924    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2925    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2926    for (unsigned i = 1, e = M->getNumOperands();
2927         SumOpRes != BitWidth && i != e; ++i)
2928      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2929                          BitWidth);
2930    return SumOpRes;
2931  }
2932
2933  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2934    // The result is the min of all operands results.
2935    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2936    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2937      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2938    return MinOpRes;
2939  }
2940
2941  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2942    // The result is the min of all operands results.
2943    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2944    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2945      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2946    return MinOpRes;
2947  }
2948
2949  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2950    // The result is the min of all operands results.
2951    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2952    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2953      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2954    return MinOpRes;
2955  }
2956
2957  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2958    // For a SCEVUnknown, ask ValueTracking.
2959    unsigned BitWidth = getTypeSizeInBits(U->getType());
2960    APInt Mask = APInt::getAllOnesValue(BitWidth);
2961    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2962    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2963    return Zeros.countTrailingOnes();
2964  }
2965
2966  // SCEVUDivExpr
2967  return 0;
2968}
2969
2970/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2971///
2972ConstantRange
2973ScalarEvolution::getUnsignedRange(const SCEV *S) {
2974
2975  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2976    return ConstantRange(C->getValue()->getValue());
2977
2978  unsigned BitWidth = getTypeSizeInBits(S->getType());
2979  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2980
2981  // If the value has known zeros, the maximum unsigned value will have those
2982  // known zeros as well.
2983  uint32_t TZ = GetMinTrailingZeros(S);
2984  if (TZ != 0)
2985    ConservativeResult =
2986      ConstantRange(APInt::getMinValue(BitWidth),
2987                    APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
2988
2989  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2990    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2991    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2992      X = X.add(getUnsignedRange(Add->getOperand(i)));
2993    return ConservativeResult.intersectWith(X);
2994  }
2995
2996  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2997    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2998    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2999      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
3000    return ConservativeResult.intersectWith(X);
3001  }
3002
3003  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3004    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
3005    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3006      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
3007    return ConservativeResult.intersectWith(X);
3008  }
3009
3010  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3011    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
3012    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3013      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
3014    return ConservativeResult.intersectWith(X);
3015  }
3016
3017  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3018    ConstantRange X = getUnsignedRange(UDiv->getLHS());
3019    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
3020    return ConservativeResult.intersectWith(X.udiv(Y));
3021  }
3022
3023  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3024    ConstantRange X = getUnsignedRange(ZExt->getOperand());
3025    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
3026  }
3027
3028  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3029    ConstantRange X = getUnsignedRange(SExt->getOperand());
3030    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
3031  }
3032
3033  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3034    ConstantRange X = getUnsignedRange(Trunc->getOperand());
3035    return ConservativeResult.intersectWith(X.truncate(BitWidth));
3036  }
3037
3038  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3039    // If there's no unsigned wrap, the value will never be less than its
3040    // initial value.
3041    if (AddRec->hasNoUnsignedWrap())
3042      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
3043        if (!C->getValue()->isZero())
3044          ConservativeResult =
3045            ConservativeResult.intersectWith(
3046              ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
3047
3048    // TODO: non-affine addrec
3049    if (AddRec->isAffine()) {
3050      const Type *Ty = AddRec->getType();
3051      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3052      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3053          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3054        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3055
3056        const SCEV *Start = AddRec->getStart();
3057        const SCEV *Step = AddRec->getStepRecurrence(*this);
3058
3059        ConstantRange StartRange = getUnsignedRange(Start);
3060        ConstantRange StepRange = getSignedRange(Step);
3061        ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3062        ConstantRange EndRange =
3063          StartRange.add(MaxBECountRange.multiply(StepRange));
3064
3065        // Check for overflow. This must be done with ConstantRange arithmetic
3066        // because we could be called from within the ScalarEvolution overflow
3067        // checking code.
3068        ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
3069        ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3070        ConstantRange ExtMaxBECountRange =
3071          MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3072        ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
3073        if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3074            ExtEndRange)
3075          return ConservativeResult;
3076
3077        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
3078                                   EndRange.getUnsignedMin());
3079        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
3080                                   EndRange.getUnsignedMax());
3081        if (Min.isMinValue() && Max.isMaxValue())
3082          return ConservativeResult;
3083        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
3084      }
3085    }
3086
3087    return ConservativeResult;
3088  }
3089
3090  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3091    // For a SCEVUnknown, ask ValueTracking.
3092    APInt Mask = APInt::getAllOnesValue(BitWidth);
3093    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3094    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
3095    if (Ones == ~Zeros + 1)
3096      return ConservativeResult;
3097    return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
3098  }
3099
3100  return ConservativeResult;
3101}
3102
3103/// getSignedRange - Determine the signed range for a particular SCEV.
3104///
3105ConstantRange
3106ScalarEvolution::getSignedRange(const SCEV *S) {
3107
3108  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3109    return ConstantRange(C->getValue()->getValue());
3110
3111  unsigned BitWidth = getTypeSizeInBits(S->getType());
3112  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3113
3114  // If the value has known zeros, the maximum signed value will have those
3115  // known zeros as well.
3116  uint32_t TZ = GetMinTrailingZeros(S);
3117  if (TZ != 0)
3118    ConservativeResult =
3119      ConstantRange(APInt::getSignedMinValue(BitWidth),
3120                    APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3121
3122  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3123    ConstantRange X = getSignedRange(Add->getOperand(0));
3124    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3125      X = X.add(getSignedRange(Add->getOperand(i)));
3126    return ConservativeResult.intersectWith(X);
3127  }
3128
3129  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3130    ConstantRange X = getSignedRange(Mul->getOperand(0));
3131    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3132      X = X.multiply(getSignedRange(Mul->getOperand(i)));
3133    return ConservativeResult.intersectWith(X);
3134  }
3135
3136  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3137    ConstantRange X = getSignedRange(SMax->getOperand(0));
3138    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3139      X = X.smax(getSignedRange(SMax->getOperand(i)));
3140    return ConservativeResult.intersectWith(X);
3141  }
3142
3143  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3144    ConstantRange X = getSignedRange(UMax->getOperand(0));
3145    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3146      X = X.umax(getSignedRange(UMax->getOperand(i)));
3147    return ConservativeResult.intersectWith(X);
3148  }
3149
3150  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3151    ConstantRange X = getSignedRange(UDiv->getLHS());
3152    ConstantRange Y = getSignedRange(UDiv->getRHS());
3153    return ConservativeResult.intersectWith(X.udiv(Y));
3154  }
3155
3156  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3157    ConstantRange X = getSignedRange(ZExt->getOperand());
3158    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
3159  }
3160
3161  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3162    ConstantRange X = getSignedRange(SExt->getOperand());
3163    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
3164  }
3165
3166  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3167    ConstantRange X = getSignedRange(Trunc->getOperand());
3168    return ConservativeResult.intersectWith(X.truncate(BitWidth));
3169  }
3170
3171  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3172    // If there's no signed wrap, and all the operands have the same sign or
3173    // zero, the value won't ever change sign.
3174    if (AddRec->hasNoSignedWrap()) {
3175      bool AllNonNeg = true;
3176      bool AllNonPos = true;
3177      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3178        if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3179        if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3180      }
3181      if (AllNonNeg)
3182        ConservativeResult = ConservativeResult.intersectWith(
3183          ConstantRange(APInt(BitWidth, 0),
3184                        APInt::getSignedMinValue(BitWidth)));
3185      else if (AllNonPos)
3186        ConservativeResult = ConservativeResult.intersectWith(
3187          ConstantRange(APInt::getSignedMinValue(BitWidth),
3188                        APInt(BitWidth, 1)));
3189    }
3190
3191    // TODO: non-affine addrec
3192    if (AddRec->isAffine()) {
3193      const Type *Ty = AddRec->getType();
3194      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3195      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3196          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3197        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3198
3199        const SCEV *Start = AddRec->getStart();
3200        const SCEV *Step = AddRec->getStepRecurrence(*this);
3201
3202        ConstantRange StartRange = getSignedRange(Start);
3203        ConstantRange StepRange = getSignedRange(Step);
3204        ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3205        ConstantRange EndRange =
3206          StartRange.add(MaxBECountRange.multiply(StepRange));
3207
3208        // Check for overflow. This must be done with ConstantRange arithmetic
3209        // because we could be called from within the ScalarEvolution overflow
3210        // checking code.
3211        ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3212        ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3213        ConstantRange ExtMaxBECountRange =
3214          MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3215        ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3216        if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3217            ExtEndRange)
3218          return ConservativeResult;
3219
3220        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3221                                   EndRange.getSignedMin());
3222        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3223                                   EndRange.getSignedMax());
3224        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3225          return ConservativeResult;
3226        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
3227      }
3228    }
3229
3230    return ConservativeResult;
3231  }
3232
3233  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3234    // For a SCEVUnknown, ask ValueTracking.
3235    if (!U->getValue()->getType()->isIntegerTy() && !TD)
3236      return ConservativeResult;
3237    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3238    if (NS == 1)
3239      return ConservativeResult;
3240    return ConservativeResult.intersectWith(
3241      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3242                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
3243  }
3244
3245  return ConservativeResult;
3246}
3247
3248/// createSCEV - We know that there is no SCEV for the specified value.
3249/// Analyze the expression.
3250///
3251const SCEV *ScalarEvolution::createSCEV(Value *V) {
3252  if (!isSCEVable(V->getType()))
3253    return getUnknown(V);
3254
3255  unsigned Opcode = Instruction::UserOp1;
3256  if (Instruction *I = dyn_cast<Instruction>(V)) {
3257    Opcode = I->getOpcode();
3258
3259    // Don't attempt to analyze instructions in blocks that aren't
3260    // reachable. Such instructions don't matter, and they aren't required
3261    // to obey basic rules for definitions dominating uses which this
3262    // analysis depends on.
3263    if (!DT->isReachableFromEntry(I->getParent()))
3264      return getUnknown(V);
3265  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3266    Opcode = CE->getOpcode();
3267  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3268    return getConstant(CI);
3269  else if (isa<ConstantPointerNull>(V))
3270    return getConstant(V->getType(), 0);
3271  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3272    return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3273  else
3274    return getUnknown(V);
3275
3276  Operator *U = cast<Operator>(V);
3277  switch (Opcode) {
3278  case Instruction::Add: {
3279    // The simple thing to do would be to just call getSCEV on both operands
3280    // and call getAddExpr with the result. However if we're looking at a
3281    // bunch of things all added together, this can be quite inefficient,
3282    // because it leads to N-1 getAddExpr calls for N ultimate operands.
3283    // Instead, gather up all the operands and make a single getAddExpr call.
3284    // LLVM IR canonical form means we need only traverse the left operands.
3285    SmallVector<const SCEV *, 4> AddOps;
3286    AddOps.push_back(getSCEV(U->getOperand(1)));
3287    for (Value *Op = U->getOperand(0);
3288         Op->getValueID() == Instruction::Add + Value::InstructionVal;
3289         Op = U->getOperand(0)) {
3290      U = cast<Operator>(Op);
3291      AddOps.push_back(getSCEV(U->getOperand(1)));
3292    }
3293    AddOps.push_back(getSCEV(U->getOperand(0)));
3294    return getAddExpr(AddOps);
3295  }
3296  case Instruction::Mul: {
3297    // See the Add code above.
3298    SmallVector<const SCEV *, 4> MulOps;
3299    MulOps.push_back(getSCEV(U->getOperand(1)));
3300    for (Value *Op = U->getOperand(0);
3301         Op->getValueID() == Instruction::Mul + Value::InstructionVal;
3302         Op = U->getOperand(0)) {
3303      U = cast<Operator>(Op);
3304      MulOps.push_back(getSCEV(U->getOperand(1)));
3305    }
3306    MulOps.push_back(getSCEV(U->getOperand(0)));
3307    return getMulExpr(MulOps);
3308  }
3309  case Instruction::UDiv:
3310    return getUDivExpr(getSCEV(U->getOperand(0)),
3311                       getSCEV(U->getOperand(1)));
3312  case Instruction::Sub:
3313    return getMinusSCEV(getSCEV(U->getOperand(0)),
3314                        getSCEV(U->getOperand(1)));
3315  case Instruction::And:
3316    // For an expression like x&255 that merely masks off the high bits,
3317    // use zext(trunc(x)) as the SCEV expression.
3318    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3319      if (CI->isNullValue())
3320        return getSCEV(U->getOperand(1));
3321      if (CI->isAllOnesValue())
3322        return getSCEV(U->getOperand(0));
3323      const APInt &A = CI->getValue();
3324
3325      // Instcombine's ShrinkDemandedConstant may strip bits out of
3326      // constants, obscuring what would otherwise be a low-bits mask.
3327      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3328      // knew about to reconstruct a low-bits mask value.
3329      unsigned LZ = A.countLeadingZeros();
3330      unsigned BitWidth = A.getBitWidth();
3331      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3332      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3333      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3334
3335      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3336
3337      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3338        return
3339          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3340                                IntegerType::get(getContext(), BitWidth - LZ)),
3341                            U->getType());
3342    }
3343    break;
3344
3345  case Instruction::Or:
3346    // If the RHS of the Or is a constant, we may have something like:
3347    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
3348    // optimizations will transparently handle this case.
3349    //
3350    // In order for this transformation to be safe, the LHS must be of the
3351    // form X*(2^n) and the Or constant must be less than 2^n.
3352    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3353      const SCEV *LHS = getSCEV(U->getOperand(0));
3354      const APInt &CIVal = CI->getValue();
3355      if (GetMinTrailingZeros(LHS) >=
3356          (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3357        // Build a plain add SCEV.
3358        const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3359        // If the LHS of the add was an addrec and it has no-wrap flags,
3360        // transfer the no-wrap flags, since an or won't introduce a wrap.
3361        if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3362          const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3363          if (OldAR->hasNoUnsignedWrap())
3364            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
3365          if (OldAR->hasNoSignedWrap())
3366            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
3367        }
3368        return S;
3369      }
3370    }
3371    break;
3372  case Instruction::Xor:
3373    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3374      // If the RHS of the xor is a signbit, then this is just an add.
3375      // Instcombine turns add of signbit into xor as a strength reduction step.
3376      if (CI->getValue().isSignBit())
3377        return getAddExpr(getSCEV(U->getOperand(0)),
3378                          getSCEV(U->getOperand(1)));
3379
3380      // If the RHS of xor is -1, then this is a not operation.
3381      if (CI->isAllOnesValue())
3382        return getNotSCEV(getSCEV(U->getOperand(0)));
3383
3384      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3385      // This is a variant of the check for xor with -1, and it handles
3386      // the case where instcombine has trimmed non-demanded bits out
3387      // of an xor with -1.
3388      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3389        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3390          if (BO->getOpcode() == Instruction::And &&
3391              LCI->getValue() == CI->getValue())
3392            if (const SCEVZeroExtendExpr *Z =
3393                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3394              const Type *UTy = U->getType();
3395              const SCEV *Z0 = Z->getOperand();
3396              const Type *Z0Ty = Z0->getType();
3397              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3398
3399              // If C is a low-bits mask, the zero extend is serving to
3400              // mask off the high bits. Complement the operand and
3401              // re-apply the zext.
3402              if (APIntOps::isMask(Z0TySize, CI->getValue()))
3403                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3404
3405              // If C is a single bit, it may be in the sign-bit position
3406              // before the zero-extend. In this case, represent the xor
3407              // using an add, which is equivalent, and re-apply the zext.
3408              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
3409              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3410                  Trunc.isSignBit())
3411                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3412                                         UTy);
3413            }
3414    }
3415    break;
3416
3417  case Instruction::Shl:
3418    // Turn shift left of a constant amount into a multiply.
3419    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3420      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3421
3422      // If the shift count is not less than the bitwidth, the result of
3423      // the shift is undefined. Don't try to analyze it, because the
3424      // resolution chosen here may differ from the resolution chosen in
3425      // other parts of the compiler.
3426      if (SA->getValue().uge(BitWidth))
3427        break;
3428
3429      Constant *X = ConstantInt::get(getContext(),
3430        APInt(BitWidth, 1).shl(SA->getZExtValue()));
3431      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3432    }
3433    break;
3434
3435  case Instruction::LShr:
3436    // Turn logical shift right of a constant into a unsigned divide.
3437    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3438      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3439
3440      // If the shift count is not less than the bitwidth, the result of
3441      // the shift is undefined. Don't try to analyze it, because the
3442      // resolution chosen here may differ from the resolution chosen in
3443      // other parts of the compiler.
3444      if (SA->getValue().uge(BitWidth))
3445        break;
3446
3447      Constant *X = ConstantInt::get(getContext(),
3448        APInt(BitWidth, 1).shl(SA->getZExtValue()));
3449      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3450    }
3451    break;
3452
3453  case Instruction::AShr:
3454    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3455    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3456      if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
3457        if (L->getOpcode() == Instruction::Shl &&
3458            L->getOperand(1) == U->getOperand(1)) {
3459          uint64_t BitWidth = getTypeSizeInBits(U->getType());
3460
3461          // If the shift count is not less than the bitwidth, the result of
3462          // the shift is undefined. Don't try to analyze it, because the
3463          // resolution chosen here may differ from the resolution chosen in
3464          // other parts of the compiler.
3465          if (CI->getValue().uge(BitWidth))
3466            break;
3467
3468          uint64_t Amt = BitWidth - CI->getZExtValue();
3469          if (Amt == BitWidth)
3470            return getSCEV(L->getOperand(0));       // shift by zero --> noop
3471          return
3472            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3473                                              IntegerType::get(getContext(),
3474                                                               Amt)),
3475                              U->getType());
3476        }
3477    break;
3478
3479  case Instruction::Trunc:
3480    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3481
3482  case Instruction::ZExt:
3483    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3484
3485  case Instruction::SExt:
3486    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3487
3488  case Instruction::BitCast:
3489    // BitCasts are no-op casts so we just eliminate the cast.
3490    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3491      return getSCEV(U->getOperand(0));
3492    break;
3493
3494  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3495  // lead to pointer expressions which cannot safely be expanded to GEPs,
3496  // because ScalarEvolution doesn't respect the GEP aliasing rules when
3497  // simplifying integer expressions.
3498
3499  case Instruction::GetElementPtr:
3500    return createNodeForGEP(cast<GEPOperator>(U));
3501
3502  case Instruction::PHI:
3503    return createNodeForPHI(cast<PHINode>(U));
3504
3505  case Instruction::Select:
3506    // This could be a smax or umax that was lowered earlier.
3507    // Try to recover it.
3508    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3509      Value *LHS = ICI->getOperand(0);
3510      Value *RHS = ICI->getOperand(1);
3511      switch (ICI->getPredicate()) {
3512      case ICmpInst::ICMP_SLT:
3513      case ICmpInst::ICMP_SLE:
3514        std::swap(LHS, RHS);
3515        // fall through
3516      case ICmpInst::ICMP_SGT:
3517      case ICmpInst::ICMP_SGE:
3518        // a >s b ? a+x : b+x  ->  smax(a, b)+x
3519        // a >s b ? b+x : a+x  ->  smin(a, b)+x
3520        if (LHS->getType() == U->getType()) {
3521          const SCEV *LS = getSCEV(LHS);
3522          const SCEV *RS = getSCEV(RHS);
3523          const SCEV *LA = getSCEV(U->getOperand(1));
3524          const SCEV *RA = getSCEV(U->getOperand(2));
3525          const SCEV *LDiff = getMinusSCEV(LA, LS);
3526          const SCEV *RDiff = getMinusSCEV(RA, RS);
3527          if (LDiff == RDiff)
3528            return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3529          LDiff = getMinusSCEV(LA, RS);
3530          RDiff = getMinusSCEV(RA, LS);
3531          if (LDiff == RDiff)
3532            return getAddExpr(getSMinExpr(LS, RS), LDiff);
3533        }
3534        break;
3535      case ICmpInst::ICMP_ULT:
3536      case ICmpInst::ICMP_ULE:
3537        std::swap(LHS, RHS);
3538        // fall through
3539      case ICmpInst::ICMP_UGT:
3540      case ICmpInst::ICMP_UGE:
3541        // a >u b ? a+x : b+x  ->  umax(a, b)+x
3542        // a >u b ? b+x : a+x  ->  umin(a, b)+x
3543        if (LHS->getType() == U->getType()) {
3544          const SCEV *LS = getSCEV(LHS);
3545          const SCEV *RS = getSCEV(RHS);
3546          const SCEV *LA = getSCEV(U->getOperand(1));
3547          const SCEV *RA = getSCEV(U->getOperand(2));
3548          const SCEV *LDiff = getMinusSCEV(LA, LS);
3549          const SCEV *RDiff = getMinusSCEV(RA, RS);
3550          if (LDiff == RDiff)
3551            return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3552          LDiff = getMinusSCEV(LA, RS);
3553          RDiff = getMinusSCEV(RA, LS);
3554          if (LDiff == RDiff)
3555            return getAddExpr(getUMinExpr(LS, RS), LDiff);
3556        }
3557        break;
3558      case ICmpInst::ICMP_NE:
3559        // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
3560        if (LHS->getType() == U->getType() &&
3561            isa<ConstantInt>(RHS) &&
3562            cast<ConstantInt>(RHS)->isZero()) {
3563          const SCEV *One = getConstant(LHS->getType(), 1);
3564          const SCEV *LS = getSCEV(LHS);
3565          const SCEV *LA = getSCEV(U->getOperand(1));
3566          const SCEV *RA = getSCEV(U->getOperand(2));
3567          const SCEV *LDiff = getMinusSCEV(LA, LS);
3568          const SCEV *RDiff = getMinusSCEV(RA, One);
3569          if (LDiff == RDiff)
3570            return getAddExpr(getUMaxExpr(One, LS), LDiff);
3571        }
3572        break;
3573      case ICmpInst::ICMP_EQ:
3574        // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
3575        if (LHS->getType() == U->getType() &&
3576            isa<ConstantInt>(RHS) &&
3577            cast<ConstantInt>(RHS)->isZero()) {
3578          const SCEV *One = getConstant(LHS->getType(), 1);
3579          const SCEV *LS = getSCEV(LHS);
3580          const SCEV *LA = getSCEV(U->getOperand(1));
3581          const SCEV *RA = getSCEV(U->getOperand(2));
3582          const SCEV *LDiff = getMinusSCEV(LA, One);
3583          const SCEV *RDiff = getMinusSCEV(RA, LS);
3584          if (LDiff == RDiff)
3585            return getAddExpr(getUMaxExpr(One, LS), LDiff);
3586        }
3587        break;
3588      default:
3589        break;
3590      }
3591    }
3592
3593  default: // We cannot analyze this expression.
3594    break;
3595  }
3596
3597  return getUnknown(V);
3598}
3599
3600
3601
3602//===----------------------------------------------------------------------===//
3603//                   Iteration Count Computation Code
3604//
3605
3606/// getBackedgeTakenCount - If the specified loop has a predictable
3607/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3608/// object. The backedge-taken count is the number of times the loop header
3609/// will be branched to from within the loop. This is one less than the
3610/// trip count of the loop, since it doesn't count the first iteration,
3611/// when the header is branched to from outside the loop.
3612///
3613/// Note that it is not valid to call this method on a loop without a
3614/// loop-invariant backedge-taken count (see
3615/// hasLoopInvariantBackedgeTakenCount).
3616///
3617const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3618  return getBackedgeTakenInfo(L).Exact;
3619}
3620
3621/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3622/// return the least SCEV value that is known never to be less than the
3623/// actual backedge taken count.
3624const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3625  return getBackedgeTakenInfo(L).Max;
3626}
3627
3628/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3629/// onto the given Worklist.
3630static void
3631PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3632  BasicBlock *Header = L->getHeader();
3633
3634  // Push all Loop-header PHIs onto the Worklist stack.
3635  for (BasicBlock::iterator I = Header->begin();
3636       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3637    Worklist.push_back(PN);
3638}
3639
3640const ScalarEvolution::BackedgeTakenInfo &
3641ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3642  // Initially insert a CouldNotCompute for this loop. If the insertion
3643  // succeeds, proceed to actually compute a backedge-taken count and
3644  // update the value. The temporary CouldNotCompute value tells SCEV
3645  // code elsewhere that it shouldn't attempt to request a new
3646  // backedge-taken count, which could result in infinite recursion.
3647  std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
3648    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3649  if (Pair.second) {
3650    BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
3651    if (BECount.Exact != getCouldNotCompute()) {
3652      assert(BECount.Exact->isLoopInvariant(L) &&
3653             BECount.Max->isLoopInvariant(L) &&
3654             "Computed backedge-taken count isn't loop invariant for loop!");
3655      ++NumTripCountsComputed;
3656
3657      // Update the value in the map.
3658      Pair.first->second = BECount;
3659    } else {
3660      if (BECount.Max != getCouldNotCompute())
3661        // Update the value in the map.
3662        Pair.first->second = BECount;
3663      if (isa<PHINode>(L->getHeader()->begin()))
3664        // Only count loops that have phi nodes as not being computable.
3665        ++NumTripCountsNotComputed;
3666    }
3667
3668    // Now that we know more about the trip count for this loop, forget any
3669    // existing SCEV values for PHI nodes in this loop since they are only
3670    // conservative estimates made without the benefit of trip count
3671    // information. This is similar to the code in forgetLoop, except that
3672    // it handles SCEVUnknown PHI nodes specially.
3673    if (BECount.hasAnyInfo()) {
3674      SmallVector<Instruction *, 16> Worklist;
3675      PushLoopPHIs(L, Worklist);
3676
3677      SmallPtrSet<Instruction *, 8> Visited;
3678      while (!Worklist.empty()) {
3679        Instruction *I = Worklist.pop_back_val();
3680        if (!Visited.insert(I)) continue;
3681
3682        std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3683          Scalars.find(static_cast<Value *>(I));
3684        if (It != Scalars.end()) {
3685          // SCEVUnknown for a PHI either means that it has an unrecognized
3686          // structure, or it's a PHI that's in the progress of being computed
3687          // by createNodeForPHI.  In the former case, additional loop trip
3688          // count information isn't going to change anything. In the later
3689          // case, createNodeForPHI will perform the necessary updates on its
3690          // own when it gets to that point.
3691          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
3692            ValuesAtScopes.erase(It->second);
3693            Scalars.erase(It);
3694          }
3695          if (PHINode *PN = dyn_cast<PHINode>(I))
3696            ConstantEvolutionLoopExitValue.erase(PN);
3697        }
3698
3699        PushDefUseChildren(I, Worklist);
3700      }
3701    }
3702  }
3703  return Pair.first->second;
3704}
3705
3706/// forgetLoop - This method should be called by the client when it has
3707/// changed a loop in a way that may effect ScalarEvolution's ability to
3708/// compute a trip count, or if the loop is deleted.
3709void ScalarEvolution::forgetLoop(const Loop *L) {
3710  // Drop any stored trip count value.
3711  BackedgeTakenCounts.erase(L);
3712
3713  // Drop information about expressions based on loop-header PHIs.
3714  SmallVector<Instruction *, 16> Worklist;
3715  PushLoopPHIs(L, Worklist);
3716
3717  SmallPtrSet<Instruction *, 8> Visited;
3718  while (!Worklist.empty()) {
3719    Instruction *I = Worklist.pop_back_val();
3720    if (!Visited.insert(I)) continue;
3721
3722    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3723      Scalars.find(static_cast<Value *>(I));
3724    if (It != Scalars.end()) {
3725      ValuesAtScopes.erase(It->second);
3726      Scalars.erase(It);
3727      if (PHINode *PN = dyn_cast<PHINode>(I))
3728        ConstantEvolutionLoopExitValue.erase(PN);
3729    }
3730
3731    PushDefUseChildren(I, Worklist);
3732  }
3733}
3734
3735/// forgetValue - This method should be called by the client when it has
3736/// changed a value in a way that may effect its value, or which may
3737/// disconnect it from a def-use chain linking it to a loop.
3738void ScalarEvolution::forgetValue(Value *V) {
3739  Instruction *I = dyn_cast<Instruction>(V);
3740  if (!I) return;
3741
3742  // Drop information about expressions based on loop-header PHIs.
3743  SmallVector<Instruction *, 16> Worklist;
3744  Worklist.push_back(I);
3745
3746  SmallPtrSet<Instruction *, 8> Visited;
3747  while (!Worklist.empty()) {
3748    I = Worklist.pop_back_val();
3749    if (!Visited.insert(I)) continue;
3750
3751    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3752      Scalars.find(static_cast<Value *>(I));
3753    if (It != Scalars.end()) {
3754      ValuesAtScopes.erase(It->second);
3755      Scalars.erase(It);
3756      if (PHINode *PN = dyn_cast<PHINode>(I))
3757        ConstantEvolutionLoopExitValue.erase(PN);
3758    }
3759
3760    PushDefUseChildren(I, Worklist);
3761  }
3762}
3763
3764/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3765/// of the specified loop will execute.
3766ScalarEvolution::BackedgeTakenInfo
3767ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3768  SmallVector<BasicBlock *, 8> ExitingBlocks;
3769  L->getExitingBlocks(ExitingBlocks);
3770
3771  // Examine all exits and pick the most conservative values.
3772  const SCEV *BECount = getCouldNotCompute();
3773  const SCEV *MaxBECount = getCouldNotCompute();
3774  bool CouldNotComputeBECount = false;
3775  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3776    BackedgeTakenInfo NewBTI =
3777      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3778
3779    if (NewBTI.Exact == getCouldNotCompute()) {
3780      // We couldn't compute an exact value for this exit, so
3781      // we won't be able to compute an exact value for the loop.
3782      CouldNotComputeBECount = true;
3783      BECount = getCouldNotCompute();
3784    } else if (!CouldNotComputeBECount) {
3785      if (BECount == getCouldNotCompute())
3786        BECount = NewBTI.Exact;
3787      else
3788        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3789    }
3790    if (MaxBECount == getCouldNotCompute())
3791      MaxBECount = NewBTI.Max;
3792    else if (NewBTI.Max != getCouldNotCompute())
3793      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3794  }
3795
3796  return BackedgeTakenInfo(BECount, MaxBECount);
3797}
3798
3799/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3800/// of the specified loop will execute if it exits via the specified block.
3801ScalarEvolution::BackedgeTakenInfo
3802ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3803                                                   BasicBlock *ExitingBlock) {
3804
3805  // Okay, we've chosen an exiting block.  See what condition causes us to
3806  // exit at this block.
3807  //
3808  // FIXME: we should be able to handle switch instructions (with a single exit)
3809  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3810  if (ExitBr == 0) return getCouldNotCompute();
3811  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3812
3813  // At this point, we know we have a conditional branch that determines whether
3814  // the loop is exited.  However, we don't know if the branch is executed each
3815  // time through the loop.  If not, then the execution count of the branch will
3816  // not be equal to the trip count of the loop.
3817  //
3818  // Currently we check for this by checking to see if the Exit branch goes to
3819  // the loop header.  If so, we know it will always execute the same number of
3820  // times as the loop.  We also handle the case where the exit block *is* the
3821  // loop header.  This is common for un-rotated loops.
3822  //
3823  // If both of those tests fail, walk up the unique predecessor chain to the
3824  // header, stopping if there is an edge that doesn't exit the loop. If the
3825  // header is reached, the execution count of the branch will be equal to the
3826  // trip count of the loop.
3827  //
3828  //  More extensive analysis could be done to handle more cases here.
3829  //
3830  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3831      ExitBr->getSuccessor(1) != L->getHeader() &&
3832      ExitBr->getParent() != L->getHeader()) {
3833    // The simple checks failed, try climbing the unique predecessor chain
3834    // up to the header.
3835    bool Ok = false;
3836    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3837      BasicBlock *Pred = BB->getUniquePredecessor();
3838      if (!Pred)
3839        return getCouldNotCompute();
3840      TerminatorInst *PredTerm = Pred->getTerminator();
3841      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3842        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3843        if (PredSucc == BB)
3844          continue;
3845        // If the predecessor has a successor that isn't BB and isn't
3846        // outside the loop, assume the worst.
3847        if (L->contains(PredSucc))
3848          return getCouldNotCompute();
3849      }
3850      if (Pred == L->getHeader()) {
3851        Ok = true;
3852        break;
3853      }
3854      BB = Pred;
3855    }
3856    if (!Ok)
3857      return getCouldNotCompute();
3858  }
3859
3860  // Proceed to the next level to examine the exit condition expression.
3861  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3862                                               ExitBr->getSuccessor(0),
3863                                               ExitBr->getSuccessor(1));
3864}
3865
3866/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3867/// backedge of the specified loop will execute if its exit condition
3868/// were a conditional branch of ExitCond, TBB, and FBB.
3869ScalarEvolution::BackedgeTakenInfo
3870ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3871                                                       Value *ExitCond,
3872                                                       BasicBlock *TBB,
3873                                                       BasicBlock *FBB) {
3874  // Check if the controlling expression for this loop is an And or Or.
3875  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3876    if (BO->getOpcode() == Instruction::And) {
3877      // Recurse on the operands of the and.
3878      BackedgeTakenInfo BTI0 =
3879        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3880      BackedgeTakenInfo BTI1 =
3881        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3882      const SCEV *BECount = getCouldNotCompute();
3883      const SCEV *MaxBECount = getCouldNotCompute();
3884      if (L->contains(TBB)) {
3885        // Both conditions must be true for the loop to continue executing.
3886        // Choose the less conservative count.
3887        if (BTI0.Exact == getCouldNotCompute() ||
3888            BTI1.Exact == getCouldNotCompute())
3889          BECount = getCouldNotCompute();
3890        else
3891          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3892        if (BTI0.Max == getCouldNotCompute())
3893          MaxBECount = BTI1.Max;
3894        else if (BTI1.Max == getCouldNotCompute())
3895          MaxBECount = BTI0.Max;
3896        else
3897          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3898      } else {
3899        // Both conditions must be true at the same time for the loop to exit.
3900        // For now, be conservative.
3901        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3902        if (BTI0.Max == BTI1.Max)
3903          MaxBECount = BTI0.Max;
3904        if (BTI0.Exact == BTI1.Exact)
3905          BECount = BTI0.Exact;
3906      }
3907
3908      return BackedgeTakenInfo(BECount, MaxBECount);
3909    }
3910    if (BO->getOpcode() == Instruction::Or) {
3911      // Recurse on the operands of the or.
3912      BackedgeTakenInfo BTI0 =
3913        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3914      BackedgeTakenInfo BTI1 =
3915        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3916      const SCEV *BECount = getCouldNotCompute();
3917      const SCEV *MaxBECount = getCouldNotCompute();
3918      if (L->contains(FBB)) {
3919        // Both conditions must be false for the loop to continue executing.
3920        // Choose the less conservative count.
3921        if (BTI0.Exact == getCouldNotCompute() ||
3922            BTI1.Exact == getCouldNotCompute())
3923          BECount = getCouldNotCompute();
3924        else
3925          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3926        if (BTI0.Max == getCouldNotCompute())
3927          MaxBECount = BTI1.Max;
3928        else if (BTI1.Max == getCouldNotCompute())
3929          MaxBECount = BTI0.Max;
3930        else
3931          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3932      } else {
3933        // Both conditions must be false at the same time for the loop to exit.
3934        // For now, be conservative.
3935        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3936        if (BTI0.Max == BTI1.Max)
3937          MaxBECount = BTI0.Max;
3938        if (BTI0.Exact == BTI1.Exact)
3939          BECount = BTI0.Exact;
3940      }
3941
3942      return BackedgeTakenInfo(BECount, MaxBECount);
3943    }
3944  }
3945
3946  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3947  // Proceed to the next level to examine the icmp.
3948  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3949    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3950
3951  // Check for a constant condition. These are normally stripped out by
3952  // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
3953  // preserve the CFG and is temporarily leaving constant conditions
3954  // in place.
3955  if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
3956    if (L->contains(FBB) == !CI->getZExtValue())
3957      // The backedge is always taken.
3958      return getCouldNotCompute();
3959    else
3960      // The backedge is never taken.
3961      return getConstant(CI->getType(), 0);
3962  }
3963
3964  // If it's not an integer or pointer comparison then compute it the hard way.
3965  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3966}
3967
3968/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3969/// backedge of the specified loop will execute if its exit condition
3970/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3971ScalarEvolution::BackedgeTakenInfo
3972ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3973                                                           ICmpInst *ExitCond,
3974                                                           BasicBlock *TBB,
3975                                                           BasicBlock *FBB) {
3976
3977  // If the condition was exit on true, convert the condition to exit on false
3978  ICmpInst::Predicate Cond;
3979  if (!L->contains(FBB))
3980    Cond = ExitCond->getPredicate();
3981  else
3982    Cond = ExitCond->getInversePredicate();
3983
3984  // Handle common loops like: for (X = "string"; *X; ++X)
3985  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3986    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3987      BackedgeTakenInfo ItCnt =
3988        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3989      if (ItCnt.hasAnyInfo())
3990        return ItCnt;
3991    }
3992
3993  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3994  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3995
3996  // Try to evaluate any dependencies out of the loop.
3997  LHS = getSCEVAtScope(LHS, L);
3998  RHS = getSCEVAtScope(RHS, L);
3999
4000  // At this point, we would like to compute how many iterations of the
4001  // loop the predicate will return true for these inputs.
4002  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
4003    // If there is a loop-invariant, force it into the RHS.
4004    std::swap(LHS, RHS);
4005    Cond = ICmpInst::getSwappedPredicate(Cond);
4006  }
4007
4008  // Simplify the operands before analyzing them.
4009  (void)SimplifyICmpOperands(Cond, LHS, RHS);
4010
4011  // If we have a comparison of a chrec against a constant, try to use value
4012  // ranges to answer this query.
4013  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
4014    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
4015      if (AddRec->getLoop() == L) {
4016        // Form the constant range.
4017        ConstantRange CompRange(
4018            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
4019
4020        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
4021        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
4022      }
4023
4024  switch (Cond) {
4025  case ICmpInst::ICMP_NE: {                     // while (X != Y)
4026    // Convert to: while (X-Y != 0)
4027    BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
4028    if (BTI.hasAnyInfo()) return BTI;
4029    break;
4030  }
4031  case ICmpInst::ICMP_EQ: {                     // while (X == Y)
4032    // Convert to: while (X-Y == 0)
4033    BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
4034    if (BTI.hasAnyInfo()) return BTI;
4035    break;
4036  }
4037  case ICmpInst::ICMP_SLT: {
4038    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
4039    if (BTI.hasAnyInfo()) return BTI;
4040    break;
4041  }
4042  case ICmpInst::ICMP_SGT: {
4043    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
4044                                             getNotSCEV(RHS), L, true);
4045    if (BTI.hasAnyInfo()) return BTI;
4046    break;
4047  }
4048  case ICmpInst::ICMP_ULT: {
4049    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
4050    if (BTI.hasAnyInfo()) return BTI;
4051    break;
4052  }
4053  case ICmpInst::ICMP_UGT: {
4054    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
4055                                             getNotSCEV(RHS), L, false);
4056    if (BTI.hasAnyInfo()) return BTI;
4057    break;
4058  }
4059  default:
4060#if 0
4061    dbgs() << "ComputeBackedgeTakenCount ";
4062    if (ExitCond->getOperand(0)->getType()->isUnsigned())
4063      dbgs() << "[unsigned] ";
4064    dbgs() << *LHS << "   "
4065         << Instruction::getOpcodeName(Instruction::ICmp)
4066         << "   " << *RHS << "\n";
4067#endif
4068    break;
4069  }
4070  return
4071    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
4072}
4073
4074static ConstantInt *
4075EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
4076                                ScalarEvolution &SE) {
4077  const SCEV *InVal = SE.getConstant(C);
4078  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
4079  assert(isa<SCEVConstant>(Val) &&
4080         "Evaluation of SCEV at constant didn't fold correctly?");
4081  return cast<SCEVConstant>(Val)->getValue();
4082}
4083
4084/// GetAddressedElementFromGlobal - Given a global variable with an initializer
4085/// and a GEP expression (missing the pointer index) indexing into it, return
4086/// the addressed element of the initializer or null if the index expression is
4087/// invalid.
4088static Constant *
4089GetAddressedElementFromGlobal(GlobalVariable *GV,
4090                              const std::vector<ConstantInt*> &Indices) {
4091  Constant *Init = GV->getInitializer();
4092  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
4093    uint64_t Idx = Indices[i]->getZExtValue();
4094    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
4095      assert(Idx < CS->getNumOperands() && "Bad struct index!");
4096      Init = cast<Constant>(CS->getOperand(Idx));
4097    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
4098      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
4099      Init = cast<Constant>(CA->getOperand(Idx));
4100    } else if (isa<ConstantAggregateZero>(Init)) {
4101      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
4102        assert(Idx < STy->getNumElements() && "Bad struct index!");
4103        Init = Constant::getNullValue(STy->getElementType(Idx));
4104      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
4105        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
4106        Init = Constant::getNullValue(ATy->getElementType());
4107      } else {
4108        llvm_unreachable("Unknown constant aggregate type!");
4109      }
4110      return 0;
4111    } else {
4112      return 0; // Unknown initializer type
4113    }
4114  }
4115  return Init;
4116}
4117
4118/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
4119/// 'icmp op load X, cst', try to see if we can compute the backedge
4120/// execution count.
4121ScalarEvolution::BackedgeTakenInfo
4122ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
4123                                                LoadInst *LI,
4124                                                Constant *RHS,
4125                                                const Loop *L,
4126                                                ICmpInst::Predicate predicate) {
4127  if (LI->isVolatile()) return getCouldNotCompute();
4128
4129  // Check to see if the loaded pointer is a getelementptr of a global.
4130  // TODO: Use SCEV instead of manually grubbing with GEPs.
4131  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
4132  if (!GEP) return getCouldNotCompute();
4133
4134  // Make sure that it is really a constant global we are gepping, with an
4135  // initializer, and make sure the first IDX is really 0.
4136  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
4137  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
4138      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
4139      !cast<Constant>(GEP->getOperand(1))->isNullValue())
4140    return getCouldNotCompute();
4141
4142  // Okay, we allow one non-constant index into the GEP instruction.
4143  Value *VarIdx = 0;
4144  std::vector<ConstantInt*> Indexes;
4145  unsigned VarIdxNum = 0;
4146  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
4147    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
4148      Indexes.push_back(CI);
4149    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
4150      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
4151      VarIdx = GEP->getOperand(i);
4152      VarIdxNum = i-2;
4153      Indexes.push_back(0);
4154    }
4155
4156  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4157  // Check to see if X is a loop variant variable value now.
4158  const SCEV *Idx = getSCEV(VarIdx);
4159  Idx = getSCEVAtScope(Idx, L);
4160
4161  // We can only recognize very limited forms of loop index expressions, in
4162  // particular, only affine AddRec's like {C1,+,C2}.
4163  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
4164  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
4165      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
4166      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
4167    return getCouldNotCompute();
4168
4169  unsigned MaxSteps = MaxBruteForceIterations;
4170  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
4171    ConstantInt *ItCst = ConstantInt::get(
4172                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
4173    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
4174
4175    // Form the GEP offset.
4176    Indexes[VarIdxNum] = Val;
4177
4178    Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
4179    if (Result == 0) break;  // Cannot compute!
4180
4181    // Evaluate the condition for this iteration.
4182    Result = ConstantExpr::getICmp(predicate, Result, RHS);
4183    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
4184    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
4185#if 0
4186      dbgs() << "\n***\n*** Computed loop count " << *ItCst
4187             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
4188             << "***\n";
4189#endif
4190      ++NumArrayLenItCounts;
4191      return getConstant(ItCst);   // Found terminating iteration!
4192    }
4193  }
4194  return getCouldNotCompute();
4195}
4196
4197
4198/// CanConstantFold - Return true if we can constant fold an instruction of the
4199/// specified type, assuming that all operands were constants.
4200static bool CanConstantFold(const Instruction *I) {
4201  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
4202      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
4203    return true;
4204
4205  if (const CallInst *CI = dyn_cast<CallInst>(I))
4206    if (const Function *F = CI->getCalledFunction())
4207      return canConstantFoldCallTo(F);
4208  return false;
4209}
4210
4211/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4212/// in the loop that V is derived from.  We allow arbitrary operations along the
4213/// way, but the operands of an operation must either be constants or a value
4214/// derived from a constant PHI.  If this expression does not fit with these
4215/// constraints, return null.
4216static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
4217  // If this is not an instruction, or if this is an instruction outside of the
4218  // loop, it can't be derived from a loop PHI.
4219  Instruction *I = dyn_cast<Instruction>(V);
4220  if (I == 0 || !L->contains(I)) return 0;
4221
4222  if (PHINode *PN = dyn_cast<PHINode>(I)) {
4223    if (L->getHeader() == I->getParent())
4224      return PN;
4225    else
4226      // We don't currently keep track of the control flow needed to evaluate
4227      // PHIs, so we cannot handle PHIs inside of loops.
4228      return 0;
4229  }
4230
4231  // If we won't be able to constant fold this expression even if the operands
4232  // are constants, return early.
4233  if (!CanConstantFold(I)) return 0;
4234
4235  // Otherwise, we can evaluate this instruction if all of its operands are
4236  // constant or derived from a PHI node themselves.
4237  PHINode *PHI = 0;
4238  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
4239    if (!isa<Constant>(I->getOperand(Op))) {
4240      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
4241      if (P == 0) return 0;  // Not evolving from PHI
4242      if (PHI == 0)
4243        PHI = P;
4244      else if (PHI != P)
4245        return 0;  // Evolving from multiple different PHIs.
4246    }
4247
4248  // This is a expression evolving from a constant PHI!
4249  return PHI;
4250}
4251
4252/// EvaluateExpression - Given an expression that passes the
4253/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4254/// in the loop has the value PHIVal.  If we can't fold this expression for some
4255/// reason, return null.
4256static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
4257                                    const TargetData *TD) {
4258  if (isa<PHINode>(V)) return PHIVal;
4259  if (Constant *C = dyn_cast<Constant>(V)) return C;
4260  Instruction *I = cast<Instruction>(V);
4261
4262  std::vector<Constant*> Operands(I->getNumOperands());
4263
4264  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4265    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
4266    if (Operands[i] == 0) return 0;
4267  }
4268
4269  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4270    return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4271                                           Operands[1], TD);
4272  return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4273                                  &Operands[0], Operands.size(), TD);
4274}
4275
4276/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4277/// in the header of its containing loop, we know the loop executes a
4278/// constant number of times, and the PHI node is just a recurrence
4279/// involving constants, fold it.
4280Constant *
4281ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4282                                                   const APInt &BEs,
4283                                                   const Loop *L) {
4284  std::map<PHINode*, Constant*>::iterator I =
4285    ConstantEvolutionLoopExitValue.find(PN);
4286  if (I != ConstantEvolutionLoopExitValue.end())
4287    return I->second;
4288
4289  if (BEs.ugt(MaxBruteForceIterations))
4290    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
4291
4292  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4293
4294  // Since the loop is canonicalized, the PHI node must have two entries.  One
4295  // entry must be a constant (coming in from outside of the loop), and the
4296  // second must be derived from the same PHI.
4297  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4298  Constant *StartCST =
4299    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4300  if (StartCST == 0)
4301    return RetVal = 0;  // Must be a constant.
4302
4303  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4304  if (getConstantEvolvingPHI(BEValue, L) != PN &&
4305      !isa<Constant>(BEValue))
4306    return RetVal = 0;  // Not derived from same PHI.
4307
4308  // Execute the loop symbolically to determine the exit value.
4309  if (BEs.getActiveBits() >= 32)
4310    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4311
4312  unsigned NumIterations = BEs.getZExtValue(); // must be in range
4313  unsigned IterationNum = 0;
4314  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
4315    if (IterationNum == NumIterations)
4316      return RetVal = PHIVal;  // Got exit value!
4317
4318    // Compute the value of the PHI node for the next iteration.
4319    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4320    if (NextPHI == PHIVal)
4321      return RetVal = NextPHI;  // Stopped evolving!
4322    if (NextPHI == 0)
4323      return 0;        // Couldn't evaluate!
4324    PHIVal = NextPHI;
4325  }
4326}
4327
4328/// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4329/// constant number of times (the condition evolves only from constants),
4330/// try to evaluate a few iterations of the loop until we get the exit
4331/// condition gets a value of ExitWhen (true or false).  If we cannot
4332/// evaluate the trip count of the loop, return getCouldNotCompute().
4333const SCEV *
4334ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
4335                                                       Value *Cond,
4336                                                       bool ExitWhen) {
4337  PHINode *PN = getConstantEvolvingPHI(Cond, L);
4338  if (PN == 0) return getCouldNotCompute();
4339
4340  // If the loop is canonicalized, the PHI will have exactly two entries.
4341  // That's the only form we support here.
4342  if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
4343
4344  // One entry must be a constant (coming in from outside of the loop), and the
4345  // second must be derived from the same PHI.
4346  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4347  Constant *StartCST =
4348    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4349  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
4350
4351  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4352  if (getConstantEvolvingPHI(BEValue, L) != PN &&
4353      !isa<Constant>(BEValue))
4354    return getCouldNotCompute();  // Not derived from same PHI.
4355
4356  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
4357  // the loop symbolically to determine when the condition gets a value of
4358  // "ExitWhen".
4359  unsigned IterationNum = 0;
4360  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
4361  for (Constant *PHIVal = StartCST;
4362       IterationNum != MaxIterations; ++IterationNum) {
4363    ConstantInt *CondVal =
4364      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
4365
4366    // Couldn't symbolically evaluate.
4367    if (!CondVal) return getCouldNotCompute();
4368
4369    if (CondVal->getValue() == uint64_t(ExitWhen)) {
4370      ++NumBruteForceTripCountsComputed;
4371      return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4372    }
4373
4374    // Compute the value of the PHI node for the next iteration.
4375    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4376    if (NextPHI == 0 || NextPHI == PHIVal)
4377      return getCouldNotCompute();// Couldn't evaluate or not making progress...
4378    PHIVal = NextPHI;
4379  }
4380
4381  // Too many iterations were needed to evaluate.
4382  return getCouldNotCompute();
4383}
4384
4385/// getSCEVAtScope - Return a SCEV expression for the specified value
4386/// at the specified scope in the program.  The L value specifies a loop
4387/// nest to evaluate the expression at, where null is the top-level or a
4388/// specified loop is immediately inside of the loop.
4389///
4390/// This method can be used to compute the exit value for a variable defined
4391/// in a loop by querying what the value will hold in the parent loop.
4392///
4393/// In the case that a relevant loop exit value cannot be computed, the
4394/// original value V is returned.
4395const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4396  // Check to see if we've folded this expression at this loop before.
4397  std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4398  std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4399    Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4400  if (!Pair.second)
4401    return Pair.first->second ? Pair.first->second : V;
4402
4403  // Otherwise compute it.
4404  const SCEV *C = computeSCEVAtScope(V, L);
4405  ValuesAtScopes[V][L] = C;
4406  return C;
4407}
4408
4409const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4410  if (isa<SCEVConstant>(V)) return V;
4411
4412  // If this instruction is evolved from a constant-evolving PHI, compute the
4413  // exit value from the loop without using SCEVs.
4414  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4415    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4416      const Loop *LI = (*this->LI)[I->getParent()];
4417      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
4418        if (PHINode *PN = dyn_cast<PHINode>(I))
4419          if (PN->getParent() == LI->getHeader()) {
4420            // Okay, there is no closed form solution for the PHI node.  Check
4421            // to see if the loop that contains it has a known backedge-taken
4422            // count.  If so, we may be able to force computation of the exit
4423            // value.
4424            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4425            if (const SCEVConstant *BTCC =
4426                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4427              // Okay, we know how many times the containing loop executes.  If
4428              // this is a constant evolving PHI node, get the final value at
4429              // the specified iteration number.
4430              Constant *RV = getConstantEvolutionLoopExitValue(PN,
4431                                                   BTCC->getValue()->getValue(),
4432                                                               LI);
4433              if (RV) return getSCEV(RV);
4434            }
4435          }
4436
4437      // Okay, this is an expression that we cannot symbolically evaluate
4438      // into a SCEV.  Check to see if it's possible to symbolically evaluate
4439      // the arguments into constants, and if so, try to constant propagate the
4440      // result.  This is particularly useful for computing loop exit values.
4441      if (CanConstantFold(I)) {
4442        SmallVector<Constant *, 4> Operands;
4443        bool MadeImprovement = false;
4444        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4445          Value *Op = I->getOperand(i);
4446          if (Constant *C = dyn_cast<Constant>(Op)) {
4447            Operands.push_back(C);
4448            continue;
4449          }
4450
4451          // If any of the operands is non-constant and if they are
4452          // non-integer and non-pointer, don't even try to analyze them
4453          // with scev techniques.
4454          if (!isSCEVable(Op->getType()))
4455            return V;
4456
4457          const SCEV *OrigV = getSCEV(Op);
4458          const SCEV *OpV = getSCEVAtScope(OrigV, L);
4459          MadeImprovement |= OrigV != OpV;
4460
4461          Constant *C = 0;
4462          if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
4463            C = SC->getValue();
4464          if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
4465            C = dyn_cast<Constant>(SU->getValue());
4466          if (!C) return V;
4467          if (C->getType() != Op->getType())
4468            C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4469                                                              Op->getType(),
4470                                                              false),
4471                                      C, Op->getType());
4472          Operands.push_back(C);
4473        }
4474
4475        // Check to see if getSCEVAtScope actually made an improvement.
4476        if (MadeImprovement) {
4477          Constant *C = 0;
4478          if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4479            C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4480                                                Operands[0], Operands[1], TD);
4481          else
4482            C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4483                                         &Operands[0], Operands.size(), TD);
4484          if (!C) return V;
4485          return getSCEV(C);
4486        }
4487      }
4488    }
4489
4490    // This is some other type of SCEVUnknown, just return it.
4491    return V;
4492  }
4493
4494  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
4495    // Avoid performing the look-up in the common case where the specified
4496    // expression has no loop-variant portions.
4497    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
4498      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4499      if (OpAtScope != Comm->getOperand(i)) {
4500        // Okay, at least one of these operands is loop variant but might be
4501        // foldable.  Build a new instance of the folded commutative expression.
4502        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
4503                                            Comm->op_begin()+i);
4504        NewOps.push_back(OpAtScope);
4505
4506        for (++i; i != e; ++i) {
4507          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4508          NewOps.push_back(OpAtScope);
4509        }
4510        if (isa<SCEVAddExpr>(Comm))
4511          return getAddExpr(NewOps);
4512        if (isa<SCEVMulExpr>(Comm))
4513          return getMulExpr(NewOps);
4514        if (isa<SCEVSMaxExpr>(Comm))
4515          return getSMaxExpr(NewOps);
4516        if (isa<SCEVUMaxExpr>(Comm))
4517          return getUMaxExpr(NewOps);
4518        llvm_unreachable("Unknown commutative SCEV type!");
4519      }
4520    }
4521    // If we got here, all operands are loop invariant.
4522    return Comm;
4523  }
4524
4525  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
4526    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
4527    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
4528    if (LHS == Div->getLHS() && RHS == Div->getRHS())
4529      return Div;   // must be loop invariant
4530    return getUDivExpr(LHS, RHS);
4531  }
4532
4533  // If this is a loop recurrence for a loop that does not contain L, then we
4534  // are dealing with the final value computed by the loop.
4535  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4536    // First, attempt to evaluate each operand.
4537    // Avoid performing the look-up in the common case where the specified
4538    // expression has no loop-variant portions.
4539    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
4540      const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
4541      if (OpAtScope == AddRec->getOperand(i))
4542        continue;
4543
4544      // Okay, at least one of these operands is loop variant but might be
4545      // foldable.  Build a new instance of the folded commutative expression.
4546      SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
4547                                          AddRec->op_begin()+i);
4548      NewOps.push_back(OpAtScope);
4549      for (++i; i != e; ++i)
4550        NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
4551
4552      AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
4553      break;
4554    }
4555
4556    // If the scope is outside the addrec's loop, evaluate it by using the
4557    // loop exit value of the addrec.
4558    if (!AddRec->getLoop()->contains(L)) {
4559      // To evaluate this recurrence, we need to know how many times the AddRec
4560      // loop iterates.  Compute this now.
4561      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
4562      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
4563
4564      // Then, evaluate the AddRec.
4565      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
4566    }
4567
4568    return AddRec;
4569  }
4570
4571  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
4572    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4573    if (Op == Cast->getOperand())
4574      return Cast;  // must be loop invariant
4575    return getZeroExtendExpr(Op, Cast->getType());
4576  }
4577
4578  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
4579    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4580    if (Op == Cast->getOperand())
4581      return Cast;  // must be loop invariant
4582    return getSignExtendExpr(Op, Cast->getType());
4583  }
4584
4585  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
4586    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4587    if (Op == Cast->getOperand())
4588      return Cast;  // must be loop invariant
4589    return getTruncateExpr(Op, Cast->getType());
4590  }
4591
4592  llvm_unreachable("Unknown SCEV type!");
4593  return 0;
4594}
4595
4596/// getSCEVAtScope - This is a convenience function which does
4597/// getSCEVAtScope(getSCEV(V), L).
4598const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
4599  return getSCEVAtScope(getSCEV(V), L);
4600}
4601
4602/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4603/// following equation:
4604///
4605///     A * X = B (mod N)
4606///
4607/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4608/// A and B isn't important.
4609///
4610/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4611static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
4612                                               ScalarEvolution &SE) {
4613  uint32_t BW = A.getBitWidth();
4614  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
4615  assert(A != 0 && "A must be non-zero.");
4616
4617  // 1. D = gcd(A, N)
4618  //
4619  // The gcd of A and N may have only one prime factor: 2. The number of
4620  // trailing zeros in A is its multiplicity
4621  uint32_t Mult2 = A.countTrailingZeros();
4622  // D = 2^Mult2
4623
4624  // 2. Check if B is divisible by D.
4625  //
4626  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4627  // is not less than multiplicity of this prime factor for D.
4628  if (B.countTrailingZeros() < Mult2)
4629    return SE.getCouldNotCompute();
4630
4631  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4632  // modulo (N / D).
4633  //
4634  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4635  // bit width during computations.
4636  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4637  APInt Mod(BW + 1, 0);
4638  Mod.set(BW - Mult2);  // Mod = N / D
4639  APInt I = AD.multiplicativeInverse(Mod);
4640
4641  // 4. Compute the minimum unsigned root of the equation:
4642  // I * (B / D) mod (N / D)
4643  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4644
4645  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4646  // bits.
4647  return SE.getConstant(Result.trunc(BW));
4648}
4649
4650/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4651/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4652/// might be the same) or two SCEVCouldNotCompute objects.
4653///
4654static std::pair<const SCEV *,const SCEV *>
4655SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4656  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4657  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4658  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4659  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4660
4661  // We currently can only solve this if the coefficients are constants.
4662  if (!LC || !MC || !NC) {
4663    const SCEV *CNC = SE.getCouldNotCompute();
4664    return std::make_pair(CNC, CNC);
4665  }
4666
4667  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4668  const APInt &L = LC->getValue()->getValue();
4669  const APInt &M = MC->getValue()->getValue();
4670  const APInt &N = NC->getValue()->getValue();
4671  APInt Two(BitWidth, 2);
4672  APInt Four(BitWidth, 4);
4673
4674  {
4675    using namespace APIntOps;
4676    const APInt& C = L;
4677    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4678    // The B coefficient is M-N/2
4679    APInt B(M);
4680    B -= sdiv(N,Two);
4681
4682    // The A coefficient is N/2
4683    APInt A(N.sdiv(Two));
4684
4685    // Compute the B^2-4ac term.
4686    APInt SqrtTerm(B);
4687    SqrtTerm *= B;
4688    SqrtTerm -= Four * (A * C);
4689
4690    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4691    // integer value or else APInt::sqrt() will assert.
4692    APInt SqrtVal(SqrtTerm.sqrt());
4693
4694    // Compute the two solutions for the quadratic formula.
4695    // The divisions must be performed as signed divisions.
4696    APInt NegB(-B);
4697    APInt TwoA( A << 1 );
4698    if (TwoA.isMinValue()) {
4699      const SCEV *CNC = SE.getCouldNotCompute();
4700      return std::make_pair(CNC, CNC);
4701    }
4702
4703    LLVMContext &Context = SE.getContext();
4704
4705    ConstantInt *Solution1 =
4706      ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4707    ConstantInt *Solution2 =
4708      ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4709
4710    return std::make_pair(SE.getConstant(Solution1),
4711                          SE.getConstant(Solution2));
4712    } // end APIntOps namespace
4713}
4714
4715/// HowFarToZero - Return the number of times a backedge comparing the specified
4716/// value to zero will execute.  If not computable, return CouldNotCompute.
4717ScalarEvolution::BackedgeTakenInfo
4718ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4719  // If the value is a constant
4720  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4721    // If the value is already zero, the branch will execute zero times.
4722    if (C->getValue()->isZero()) return C;
4723    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4724  }
4725
4726  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4727  if (!AddRec || AddRec->getLoop() != L)
4728    return getCouldNotCompute();
4729
4730  if (AddRec->isAffine()) {
4731    // If this is an affine expression, the execution count of this branch is
4732    // the minimum unsigned root of the following equation:
4733    //
4734    //     Start + Step*N = 0 (mod 2^BW)
4735    //
4736    // equivalent to:
4737    //
4738    //             Step*N = -Start (mod 2^BW)
4739    //
4740    // where BW is the common bit width of Start and Step.
4741
4742    // Get the initial value for the loop.
4743    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4744                                       L->getParentLoop());
4745    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4746                                      L->getParentLoop());
4747
4748    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4749      // For now we handle only constant steps.
4750
4751      // First, handle unitary steps.
4752      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4753        return getNegativeSCEV(Start);          //   N = -Start (as unsigned)
4754      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4755        return Start;                           //    N = Start (as unsigned)
4756
4757      // Then, try to solve the above equation provided that Start is constant.
4758      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4759        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4760                                            -StartC->getValue()->getValue(),
4761                                            *this);
4762    }
4763  } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
4764    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4765    // the quadratic equation to solve it.
4766    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4767                                                                    *this);
4768    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4769    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4770    if (R1) {
4771#if 0
4772      dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
4773             << "  sol#2: " << *R2 << "\n";
4774#endif
4775      // Pick the smallest positive root value.
4776      if (ConstantInt *CB =
4777          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4778                                   R1->getValue(), R2->getValue()))) {
4779        if (CB->getZExtValue() == false)
4780          std::swap(R1, R2);   // R1 is the minimum root now.
4781
4782        // We can only use this value if the chrec ends up with an exact zero
4783        // value at this index.  When solving for "X*X != 5", for example, we
4784        // should not accept a root of 2.
4785        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4786        if (Val->isZero())
4787          return R1;  // We found a quadratic root!
4788      }
4789    }
4790  }
4791
4792  return getCouldNotCompute();
4793}
4794
4795/// HowFarToNonZero - Return the number of times a backedge checking the
4796/// specified value for nonzero will execute.  If not computable, return
4797/// CouldNotCompute
4798ScalarEvolution::BackedgeTakenInfo
4799ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4800  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4801  // handle them yet except for the trivial case.  This could be expanded in the
4802  // future as needed.
4803
4804  // If the value is a constant, check to see if it is known to be non-zero
4805  // already.  If so, the backedge will execute zero times.
4806  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4807    if (!C->getValue()->isNullValue())
4808      return getConstant(C->getType(), 0);
4809    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4810  }
4811
4812  // We could implement others, but I really doubt anyone writes loops like
4813  // this, and if they did, they would already be constant folded.
4814  return getCouldNotCompute();
4815}
4816
4817/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4818/// (which may not be an immediate predecessor) which has exactly one
4819/// successor from which BB is reachable, or null if no such block is
4820/// found.
4821///
4822std::pair<BasicBlock *, BasicBlock *>
4823ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4824  // If the block has a unique predecessor, then there is no path from the
4825  // predecessor to the block that does not go through the direct edge
4826  // from the predecessor to the block.
4827  if (BasicBlock *Pred = BB->getSinglePredecessor())
4828    return std::make_pair(Pred, BB);
4829
4830  // A loop's header is defined to be a block that dominates the loop.
4831  // If the header has a unique predecessor outside the loop, it must be
4832  // a block that has exactly one successor that can reach the loop.
4833  if (Loop *L = LI->getLoopFor(BB))
4834    return std::make_pair(L->getLoopPredecessor(), L->getHeader());
4835
4836  return std::pair<BasicBlock *, BasicBlock *>();
4837}
4838
4839/// HasSameValue - SCEV structural equivalence is usually sufficient for
4840/// testing whether two expressions are equal, however for the purposes of
4841/// looking for a condition guarding a loop, it can be useful to be a little
4842/// more general, since a front-end may have replicated the controlling
4843/// expression.
4844///
4845static bool HasSameValue(const SCEV *A, const SCEV *B) {
4846  // Quick check to see if they are the same SCEV.
4847  if (A == B) return true;
4848
4849  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4850  // two different instructions with the same value. Check for this case.
4851  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4852    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4853      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4854        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4855          if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
4856            return true;
4857
4858  // Otherwise assume they may have a different value.
4859  return false;
4860}
4861
4862/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
4863/// predicate Pred. Return true iff any changes were made.
4864///
4865bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
4866                                           const SCEV *&LHS, const SCEV *&RHS) {
4867  bool Changed = false;
4868
4869  // Canonicalize a constant to the right side.
4870  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
4871    // Check for both operands constant.
4872    if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
4873      if (ConstantExpr::getICmp(Pred,
4874                                LHSC->getValue(),
4875                                RHSC->getValue())->isNullValue())
4876        goto trivially_false;
4877      else
4878        goto trivially_true;
4879    }
4880    // Otherwise swap the operands to put the constant on the right.
4881    std::swap(LHS, RHS);
4882    Pred = ICmpInst::getSwappedPredicate(Pred);
4883    Changed = true;
4884  }
4885
4886  // If we're comparing an addrec with a value which is loop-invariant in the
4887  // addrec's loop, put the addrec on the left. Also make a dominance check,
4888  // as both operands could be addrecs loop-invariant in each other's loop.
4889  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
4890    const Loop *L = AR->getLoop();
4891    if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) {
4892      std::swap(LHS, RHS);
4893      Pred = ICmpInst::getSwappedPredicate(Pred);
4894      Changed = true;
4895    }
4896  }
4897
4898  // If there's a constant operand, canonicalize comparisons with boundary
4899  // cases, and canonicalize *-or-equal comparisons to regular comparisons.
4900  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4901    const APInt &RA = RC->getValue()->getValue();
4902    switch (Pred) {
4903    default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4904    case ICmpInst::ICMP_EQ:
4905    case ICmpInst::ICMP_NE:
4906      break;
4907    case ICmpInst::ICMP_UGE:
4908      if ((RA - 1).isMinValue()) {
4909        Pred = ICmpInst::ICMP_NE;
4910        RHS = getConstant(RA - 1);
4911        Changed = true;
4912        break;
4913      }
4914      if (RA.isMaxValue()) {
4915        Pred = ICmpInst::ICMP_EQ;
4916        Changed = true;
4917        break;
4918      }
4919      if (RA.isMinValue()) goto trivially_true;
4920
4921      Pred = ICmpInst::ICMP_UGT;
4922      RHS = getConstant(RA - 1);
4923      Changed = true;
4924      break;
4925    case ICmpInst::ICMP_ULE:
4926      if ((RA + 1).isMaxValue()) {
4927        Pred = ICmpInst::ICMP_NE;
4928        RHS = getConstant(RA + 1);
4929        Changed = true;
4930        break;
4931      }
4932      if (RA.isMinValue()) {
4933        Pred = ICmpInst::ICMP_EQ;
4934        Changed = true;
4935        break;
4936      }
4937      if (RA.isMaxValue()) goto trivially_true;
4938
4939      Pred = ICmpInst::ICMP_ULT;
4940      RHS = getConstant(RA + 1);
4941      Changed = true;
4942      break;
4943    case ICmpInst::ICMP_SGE:
4944      if ((RA - 1).isMinSignedValue()) {
4945        Pred = ICmpInst::ICMP_NE;
4946        RHS = getConstant(RA - 1);
4947        Changed = true;
4948        break;
4949      }
4950      if (RA.isMaxSignedValue()) {
4951        Pred = ICmpInst::ICMP_EQ;
4952        Changed = true;
4953        break;
4954      }
4955      if (RA.isMinSignedValue()) goto trivially_true;
4956
4957      Pred = ICmpInst::ICMP_SGT;
4958      RHS = getConstant(RA - 1);
4959      Changed = true;
4960      break;
4961    case ICmpInst::ICMP_SLE:
4962      if ((RA + 1).isMaxSignedValue()) {
4963        Pred = ICmpInst::ICMP_NE;
4964        RHS = getConstant(RA + 1);
4965        Changed = true;
4966        break;
4967      }
4968      if (RA.isMinSignedValue()) {
4969        Pred = ICmpInst::ICMP_EQ;
4970        Changed = true;
4971        break;
4972      }
4973      if (RA.isMaxSignedValue()) goto trivially_true;
4974
4975      Pred = ICmpInst::ICMP_SLT;
4976      RHS = getConstant(RA + 1);
4977      Changed = true;
4978      break;
4979    case ICmpInst::ICMP_UGT:
4980      if (RA.isMinValue()) {
4981        Pred = ICmpInst::ICMP_NE;
4982        Changed = true;
4983        break;
4984      }
4985      if ((RA + 1).isMaxValue()) {
4986        Pred = ICmpInst::ICMP_EQ;
4987        RHS = getConstant(RA + 1);
4988        Changed = true;
4989        break;
4990      }
4991      if (RA.isMaxValue()) goto trivially_false;
4992      break;
4993    case ICmpInst::ICMP_ULT:
4994      if (RA.isMaxValue()) {
4995        Pred = ICmpInst::ICMP_NE;
4996        Changed = true;
4997        break;
4998      }
4999      if ((RA - 1).isMinValue()) {
5000        Pred = ICmpInst::ICMP_EQ;
5001        RHS = getConstant(RA - 1);
5002        Changed = true;
5003        break;
5004      }
5005      if (RA.isMinValue()) goto trivially_false;
5006      break;
5007    case ICmpInst::ICMP_SGT:
5008      if (RA.isMinSignedValue()) {
5009        Pred = ICmpInst::ICMP_NE;
5010        Changed = true;
5011        break;
5012      }
5013      if ((RA + 1).isMaxSignedValue()) {
5014        Pred = ICmpInst::ICMP_EQ;
5015        RHS = getConstant(RA + 1);
5016        Changed = true;
5017        break;
5018      }
5019      if (RA.isMaxSignedValue()) goto trivially_false;
5020      break;
5021    case ICmpInst::ICMP_SLT:
5022      if (RA.isMaxSignedValue()) {
5023        Pred = ICmpInst::ICMP_NE;
5024        Changed = true;
5025        break;
5026      }
5027      if ((RA - 1).isMinSignedValue()) {
5028       Pred = ICmpInst::ICMP_EQ;
5029       RHS = getConstant(RA - 1);
5030        Changed = true;
5031       break;
5032      }
5033      if (RA.isMinSignedValue()) goto trivially_false;
5034      break;
5035    }
5036  }
5037
5038  // Check for obvious equality.
5039  if (HasSameValue(LHS, RHS)) {
5040    if (ICmpInst::isTrueWhenEqual(Pred))
5041      goto trivially_true;
5042    if (ICmpInst::isFalseWhenEqual(Pred))
5043      goto trivially_false;
5044  }
5045
5046  // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
5047  // adding or subtracting 1 from one of the operands.
5048  switch (Pred) {
5049  case ICmpInst::ICMP_SLE:
5050    if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
5051      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5052                       /*HasNUW=*/false, /*HasNSW=*/true);
5053      Pred = ICmpInst::ICMP_SLT;
5054      Changed = true;
5055    } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
5056      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5057                       /*HasNUW=*/false, /*HasNSW=*/true);
5058      Pred = ICmpInst::ICMP_SLT;
5059      Changed = true;
5060    }
5061    break;
5062  case ICmpInst::ICMP_SGE:
5063    if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
5064      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5065                       /*HasNUW=*/false, /*HasNSW=*/true);
5066      Pred = ICmpInst::ICMP_SGT;
5067      Changed = true;
5068    } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
5069      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5070                       /*HasNUW=*/false, /*HasNSW=*/true);
5071      Pred = ICmpInst::ICMP_SGT;
5072      Changed = true;
5073    }
5074    break;
5075  case ICmpInst::ICMP_ULE:
5076    if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
5077      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5078                       /*HasNUW=*/true, /*HasNSW=*/false);
5079      Pred = ICmpInst::ICMP_ULT;
5080      Changed = true;
5081    } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
5082      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5083                       /*HasNUW=*/true, /*HasNSW=*/false);
5084      Pred = ICmpInst::ICMP_ULT;
5085      Changed = true;
5086    }
5087    break;
5088  case ICmpInst::ICMP_UGE:
5089    if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
5090      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5091                       /*HasNUW=*/true, /*HasNSW=*/false);
5092      Pred = ICmpInst::ICMP_UGT;
5093      Changed = true;
5094    } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
5095      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5096                       /*HasNUW=*/true, /*HasNSW=*/false);
5097      Pred = ICmpInst::ICMP_UGT;
5098      Changed = true;
5099    }
5100    break;
5101  default:
5102    break;
5103  }
5104
5105  // TODO: More simplifications are possible here.
5106
5107  return Changed;
5108
5109trivially_true:
5110  // Return 0 == 0.
5111  LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
5112  Pred = ICmpInst::ICMP_EQ;
5113  return true;
5114
5115trivially_false:
5116  // Return 0 != 0.
5117  LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
5118  Pred = ICmpInst::ICMP_NE;
5119  return true;
5120}
5121
5122bool ScalarEvolution::isKnownNegative(const SCEV *S) {
5123  return getSignedRange(S).getSignedMax().isNegative();
5124}
5125
5126bool ScalarEvolution::isKnownPositive(const SCEV *S) {
5127  return getSignedRange(S).getSignedMin().isStrictlyPositive();
5128}
5129
5130bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
5131  return !getSignedRange(S).getSignedMin().isNegative();
5132}
5133
5134bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
5135  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
5136}
5137
5138bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
5139  return isKnownNegative(S) || isKnownPositive(S);
5140}
5141
5142bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
5143                                       const SCEV *LHS, const SCEV *RHS) {
5144  // Canonicalize the inputs first.
5145  (void)SimplifyICmpOperands(Pred, LHS, RHS);
5146
5147  // If LHS or RHS is an addrec, check to see if the condition is true in
5148  // every iteration of the loop.
5149  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
5150    if (isLoopEntryGuardedByCond(
5151          AR->getLoop(), Pred, AR->getStart(), RHS) &&
5152        isLoopBackedgeGuardedByCond(
5153          AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
5154      return true;
5155  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
5156    if (isLoopEntryGuardedByCond(
5157          AR->getLoop(), Pred, LHS, AR->getStart()) &&
5158        isLoopBackedgeGuardedByCond(
5159          AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
5160      return true;
5161
5162  // Otherwise see what can be done with known constant ranges.
5163  return isKnownPredicateWithRanges(Pred, LHS, RHS);
5164}
5165
5166bool
5167ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
5168                                            const SCEV *LHS, const SCEV *RHS) {
5169  if (HasSameValue(LHS, RHS))
5170    return ICmpInst::isTrueWhenEqual(Pred);
5171
5172  // This code is split out from isKnownPredicate because it is called from
5173  // within isLoopEntryGuardedByCond.
5174  switch (Pred) {
5175  default:
5176    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5177    break;
5178  case ICmpInst::ICMP_SGT:
5179    Pred = ICmpInst::ICMP_SLT;
5180    std::swap(LHS, RHS);
5181  case ICmpInst::ICMP_SLT: {
5182    ConstantRange LHSRange = getSignedRange(LHS);
5183    ConstantRange RHSRange = getSignedRange(RHS);
5184    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
5185      return true;
5186    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
5187      return false;
5188    break;
5189  }
5190  case ICmpInst::ICMP_SGE:
5191    Pred = ICmpInst::ICMP_SLE;
5192    std::swap(LHS, RHS);
5193  case ICmpInst::ICMP_SLE: {
5194    ConstantRange LHSRange = getSignedRange(LHS);
5195    ConstantRange RHSRange = getSignedRange(RHS);
5196    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
5197      return true;
5198    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
5199      return false;
5200    break;
5201  }
5202  case ICmpInst::ICMP_UGT:
5203    Pred = ICmpInst::ICMP_ULT;
5204    std::swap(LHS, RHS);
5205  case ICmpInst::ICMP_ULT: {
5206    ConstantRange LHSRange = getUnsignedRange(LHS);
5207    ConstantRange RHSRange = getUnsignedRange(RHS);
5208    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
5209      return true;
5210    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
5211      return false;
5212    break;
5213  }
5214  case ICmpInst::ICMP_UGE:
5215    Pred = ICmpInst::ICMP_ULE;
5216    std::swap(LHS, RHS);
5217  case ICmpInst::ICMP_ULE: {
5218    ConstantRange LHSRange = getUnsignedRange(LHS);
5219    ConstantRange RHSRange = getUnsignedRange(RHS);
5220    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
5221      return true;
5222    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
5223      return false;
5224    break;
5225  }
5226  case ICmpInst::ICMP_NE: {
5227    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
5228      return true;
5229    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
5230      return true;
5231
5232    const SCEV *Diff = getMinusSCEV(LHS, RHS);
5233    if (isKnownNonZero(Diff))
5234      return true;
5235    break;
5236  }
5237  case ICmpInst::ICMP_EQ:
5238    // The check at the top of the function catches the case where
5239    // the values are known to be equal.
5240    break;
5241  }
5242  return false;
5243}
5244
5245/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
5246/// protected by a conditional between LHS and RHS.  This is used to
5247/// to eliminate casts.
5248bool
5249ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
5250                                             ICmpInst::Predicate Pred,
5251                                             const SCEV *LHS, const SCEV *RHS) {
5252  // Interpret a null as meaning no loop, where there is obviously no guard
5253  // (interprocedural conditions notwithstanding).
5254  if (!L) return true;
5255
5256  BasicBlock *Latch = L->getLoopLatch();
5257  if (!Latch)
5258    return false;
5259
5260  BranchInst *LoopContinuePredicate =
5261    dyn_cast<BranchInst>(Latch->getTerminator());
5262  if (!LoopContinuePredicate ||
5263      LoopContinuePredicate->isUnconditional())
5264    return false;
5265
5266  return isImpliedCond(Pred, LHS, RHS,
5267                       LoopContinuePredicate->getCondition(),
5268                       LoopContinuePredicate->getSuccessor(0) != L->getHeader());
5269}
5270
5271/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
5272/// by a conditional between LHS and RHS.  This is used to help avoid max
5273/// expressions in loop trip counts, and to eliminate casts.
5274bool
5275ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
5276                                          ICmpInst::Predicate Pred,
5277                                          const SCEV *LHS, const SCEV *RHS) {
5278  // Interpret a null as meaning no loop, where there is obviously no guard
5279  // (interprocedural conditions notwithstanding).
5280  if (!L) return false;
5281
5282  // Starting at the loop predecessor, climb up the predecessor chain, as long
5283  // as there are predecessors that can be found that have unique successors
5284  // leading to the original header.
5285  for (std::pair<BasicBlock *, BasicBlock *>
5286         Pair(L->getLoopPredecessor(), L->getHeader());
5287       Pair.first;
5288       Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
5289
5290    BranchInst *LoopEntryPredicate =
5291      dyn_cast<BranchInst>(Pair.first->getTerminator());
5292    if (!LoopEntryPredicate ||
5293        LoopEntryPredicate->isUnconditional())
5294      continue;
5295
5296    if (isImpliedCond(Pred, LHS, RHS,
5297                      LoopEntryPredicate->getCondition(),
5298                      LoopEntryPredicate->getSuccessor(0) != Pair.second))
5299      return true;
5300  }
5301
5302  return false;
5303}
5304
5305/// isImpliedCond - Test whether the condition described by Pred, LHS,
5306/// and RHS is true whenever the given Cond value evaluates to true.
5307bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
5308                                    const SCEV *LHS, const SCEV *RHS,
5309                                    Value *FoundCondValue,
5310                                    bool Inverse) {
5311  // Recursively handle And and Or conditions.
5312  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
5313    if (BO->getOpcode() == Instruction::And) {
5314      if (!Inverse)
5315        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5316               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5317    } else if (BO->getOpcode() == Instruction::Or) {
5318      if (Inverse)
5319        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5320               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5321    }
5322  }
5323
5324  ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
5325  if (!ICI) return false;
5326
5327  // Bail if the ICmp's operands' types are wider than the needed type
5328  // before attempting to call getSCEV on them. This avoids infinite
5329  // recursion, since the analysis of widening casts can require loop
5330  // exit condition information for overflow checking, which would
5331  // lead back here.
5332  if (getTypeSizeInBits(LHS->getType()) <
5333      getTypeSizeInBits(ICI->getOperand(0)->getType()))
5334    return false;
5335
5336  // Now that we found a conditional branch that dominates the loop, check to
5337  // see if it is the comparison we are looking for.
5338  ICmpInst::Predicate FoundPred;
5339  if (Inverse)
5340    FoundPred = ICI->getInversePredicate();
5341  else
5342    FoundPred = ICI->getPredicate();
5343
5344  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
5345  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
5346
5347  // Balance the types. The case where FoundLHS' type is wider than
5348  // LHS' type is checked for above.
5349  if (getTypeSizeInBits(LHS->getType()) >
5350      getTypeSizeInBits(FoundLHS->getType())) {
5351    if (CmpInst::isSigned(Pred)) {
5352      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
5353      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
5354    } else {
5355      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
5356      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
5357    }
5358  }
5359
5360  // Canonicalize the query to match the way instcombine will have
5361  // canonicalized the comparison.
5362  if (SimplifyICmpOperands(Pred, LHS, RHS))
5363    if (LHS == RHS)
5364      return CmpInst::isTrueWhenEqual(Pred);
5365  if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
5366    if (FoundLHS == FoundRHS)
5367      return CmpInst::isFalseWhenEqual(Pred);
5368
5369  // Check to see if we can make the LHS or RHS match.
5370  if (LHS == FoundRHS || RHS == FoundLHS) {
5371    if (isa<SCEVConstant>(RHS)) {
5372      std::swap(FoundLHS, FoundRHS);
5373      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
5374    } else {
5375      std::swap(LHS, RHS);
5376      Pred = ICmpInst::getSwappedPredicate(Pred);
5377    }
5378  }
5379
5380  // Check whether the found predicate is the same as the desired predicate.
5381  if (FoundPred == Pred)
5382    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
5383
5384  // Check whether swapping the found predicate makes it the same as the
5385  // desired predicate.
5386  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
5387    if (isa<SCEVConstant>(RHS))
5388      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
5389    else
5390      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
5391                                   RHS, LHS, FoundLHS, FoundRHS);
5392  }
5393
5394  // Check whether the actual condition is beyond sufficient.
5395  if (FoundPred == ICmpInst::ICMP_EQ)
5396    if (ICmpInst::isTrueWhenEqual(Pred))
5397      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
5398        return true;
5399  if (Pred == ICmpInst::ICMP_NE)
5400    if (!ICmpInst::isTrueWhenEqual(FoundPred))
5401      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
5402        return true;
5403
5404  // Otherwise assume the worst.
5405  return false;
5406}
5407
5408/// isImpliedCondOperands - Test whether the condition described by Pred,
5409/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
5410/// and FoundRHS is true.
5411bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
5412                                            const SCEV *LHS, const SCEV *RHS,
5413                                            const SCEV *FoundLHS,
5414                                            const SCEV *FoundRHS) {
5415  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
5416                                     FoundLHS, FoundRHS) ||
5417         // ~x < ~y --> x > y
5418         isImpliedCondOperandsHelper(Pred, LHS, RHS,
5419                                     getNotSCEV(FoundRHS),
5420                                     getNotSCEV(FoundLHS));
5421}
5422
5423/// isImpliedCondOperandsHelper - Test whether the condition described by
5424/// Pred, LHS, and RHS is true whenever the condition described by Pred,
5425/// FoundLHS, and FoundRHS is true.
5426bool
5427ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
5428                                             const SCEV *LHS, const SCEV *RHS,
5429                                             const SCEV *FoundLHS,
5430                                             const SCEV *FoundRHS) {
5431  switch (Pred) {
5432  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5433  case ICmpInst::ICMP_EQ:
5434  case ICmpInst::ICMP_NE:
5435    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
5436      return true;
5437    break;
5438  case ICmpInst::ICMP_SLT:
5439  case ICmpInst::ICMP_SLE:
5440    if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
5441        isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
5442      return true;
5443    break;
5444  case ICmpInst::ICMP_SGT:
5445  case ICmpInst::ICMP_SGE:
5446    if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
5447        isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
5448      return true;
5449    break;
5450  case ICmpInst::ICMP_ULT:
5451  case ICmpInst::ICMP_ULE:
5452    if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
5453        isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
5454      return true;
5455    break;
5456  case ICmpInst::ICMP_UGT:
5457  case ICmpInst::ICMP_UGE:
5458    if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
5459        isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
5460      return true;
5461    break;
5462  }
5463
5464  return false;
5465}
5466
5467/// getBECount - Subtract the end and start values and divide by the step,
5468/// rounding up, to get the number of times the backedge is executed. Return
5469/// CouldNotCompute if an intermediate computation overflows.
5470const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
5471                                        const SCEV *End,
5472                                        const SCEV *Step,
5473                                        bool NoWrap) {
5474  assert(!isKnownNegative(Step) &&
5475         "This code doesn't handle negative strides yet!");
5476
5477  const Type *Ty = Start->getType();
5478  const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
5479  const SCEV *Diff = getMinusSCEV(End, Start);
5480  const SCEV *RoundUp = getAddExpr(Step, NegOne);
5481
5482  // Add an adjustment to the difference between End and Start so that
5483  // the division will effectively round up.
5484  const SCEV *Add = getAddExpr(Diff, RoundUp);
5485
5486  if (!NoWrap) {
5487    // Check Add for unsigned overflow.
5488    // TODO: More sophisticated things could be done here.
5489    const Type *WideTy = IntegerType::get(getContext(),
5490                                          getTypeSizeInBits(Ty) + 1);
5491    const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
5492    const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
5493    const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
5494    if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
5495      return getCouldNotCompute();
5496  }
5497
5498  return getUDivExpr(Add, Step);
5499}
5500
5501/// HowManyLessThans - Return the number of times a backedge containing the
5502/// specified less-than comparison will execute.  If not computable, return
5503/// CouldNotCompute.
5504ScalarEvolution::BackedgeTakenInfo
5505ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
5506                                  const Loop *L, bool isSigned) {
5507  // Only handle:  "ADDREC < LoopInvariant".
5508  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
5509
5510  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
5511  if (!AddRec || AddRec->getLoop() != L)
5512    return getCouldNotCompute();
5513
5514  // Check to see if we have a flag which makes analysis easy.
5515  bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
5516                           AddRec->hasNoUnsignedWrap();
5517
5518  if (AddRec->isAffine()) {
5519    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
5520    const SCEV *Step = AddRec->getStepRecurrence(*this);
5521
5522    if (Step->isZero())
5523      return getCouldNotCompute();
5524    if (Step->isOne()) {
5525      // With unit stride, the iteration never steps past the limit value.
5526    } else if (isKnownPositive(Step)) {
5527      // Test whether a positive iteration can step past the limit
5528      // value and past the maximum value for its type in a single step.
5529      // Note that it's not sufficient to check NoWrap here, because even
5530      // though the value after a wrap is undefined, it's not undefined
5531      // behavior, so if wrap does occur, the loop could either terminate or
5532      // loop infinitely, but in either case, the loop is guaranteed to
5533      // iterate at least until the iteration where the wrapping occurs.
5534      const SCEV *One = getConstant(Step->getType(), 1);
5535      if (isSigned) {
5536        APInt Max = APInt::getSignedMaxValue(BitWidth);
5537        if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
5538              .slt(getSignedRange(RHS).getSignedMax()))
5539          return getCouldNotCompute();
5540      } else {
5541        APInt Max = APInt::getMaxValue(BitWidth);
5542        if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
5543              .ult(getUnsignedRange(RHS).getUnsignedMax()))
5544          return getCouldNotCompute();
5545      }
5546    } else
5547      // TODO: Handle negative strides here and below.
5548      return getCouldNotCompute();
5549
5550    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5551    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
5552    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5553    // treat m-n as signed nor unsigned due to overflow possibility.
5554
5555    // First, we get the value of the LHS in the first iteration: n
5556    const SCEV *Start = AddRec->getOperand(0);
5557
5558    // Determine the minimum constant start value.
5559    const SCEV *MinStart = getConstant(isSigned ?
5560      getSignedRange(Start).getSignedMin() :
5561      getUnsignedRange(Start).getUnsignedMin());
5562
5563    // If we know that the condition is true in order to enter the loop,
5564    // then we know that it will run exactly (m-n)/s times. Otherwise, we
5565    // only know that it will execute (max(m,n)-n)/s times. In both cases,
5566    // the division must round up.
5567    const SCEV *End = RHS;
5568    if (!isLoopEntryGuardedByCond(L,
5569                                  isSigned ? ICmpInst::ICMP_SLT :
5570                                             ICmpInst::ICMP_ULT,
5571                                  getMinusSCEV(Start, Step), RHS))
5572      End = isSigned ? getSMaxExpr(RHS, Start)
5573                     : getUMaxExpr(RHS, Start);
5574
5575    // Determine the maximum constant end value.
5576    const SCEV *MaxEnd = getConstant(isSigned ?
5577      getSignedRange(End).getSignedMax() :
5578      getUnsignedRange(End).getUnsignedMax());
5579
5580    // If MaxEnd is within a step of the maximum integer value in its type,
5581    // adjust it down to the minimum value which would produce the same effect.
5582    // This allows the subsequent ceiling division of (N+(step-1))/step to
5583    // compute the correct value.
5584    const SCEV *StepMinusOne = getMinusSCEV(Step,
5585                                            getConstant(Step->getType(), 1));
5586    MaxEnd = isSigned ?
5587      getSMinExpr(MaxEnd,
5588                  getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
5589                               StepMinusOne)) :
5590      getUMinExpr(MaxEnd,
5591                  getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
5592                               StepMinusOne));
5593
5594    // Finally, we subtract these two values and divide, rounding up, to get
5595    // the number of times the backedge is executed.
5596    const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
5597
5598    // The maximum backedge count is similar, except using the minimum start
5599    // value and the maximum end value.
5600    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
5601
5602    return BackedgeTakenInfo(BECount, MaxBECount);
5603  }
5604
5605  return getCouldNotCompute();
5606}
5607
5608/// getNumIterationsInRange - Return the number of iterations of this loop that
5609/// produce values in the specified constant range.  Another way of looking at
5610/// this is that it returns the first iteration number where the value is not in
5611/// the condition, thus computing the exit count. If the iteration count can't
5612/// be computed, an instance of SCEVCouldNotCompute is returned.
5613const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
5614                                                    ScalarEvolution &SE) const {
5615  if (Range.isFullSet())  // Infinite loop.
5616    return SE.getCouldNotCompute();
5617
5618  // If the start is a non-zero constant, shift the range to simplify things.
5619  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
5620    if (!SC->getValue()->isZero()) {
5621      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
5622      Operands[0] = SE.getConstant(SC->getType(), 0);
5623      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
5624      if (const SCEVAddRecExpr *ShiftedAddRec =
5625            dyn_cast<SCEVAddRecExpr>(Shifted))
5626        return ShiftedAddRec->getNumIterationsInRange(
5627                           Range.subtract(SC->getValue()->getValue()), SE);
5628      // This is strange and shouldn't happen.
5629      return SE.getCouldNotCompute();
5630    }
5631
5632  // The only time we can solve this is when we have all constant indices.
5633  // Otherwise, we cannot determine the overflow conditions.
5634  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5635    if (!isa<SCEVConstant>(getOperand(i)))
5636      return SE.getCouldNotCompute();
5637
5638
5639  // Okay at this point we know that all elements of the chrec are constants and
5640  // that the start element is zero.
5641
5642  // First check to see if the range contains zero.  If not, the first
5643  // iteration exits.
5644  unsigned BitWidth = SE.getTypeSizeInBits(getType());
5645  if (!Range.contains(APInt(BitWidth, 0)))
5646    return SE.getConstant(getType(), 0);
5647
5648  if (isAffine()) {
5649    // If this is an affine expression then we have this situation:
5650    //   Solve {0,+,A} in Range  ===  Ax in Range
5651
5652    // We know that zero is in the range.  If A is positive then we know that
5653    // the upper value of the range must be the first possible exit value.
5654    // If A is negative then the lower of the range is the last possible loop
5655    // value.  Also note that we already checked for a full range.
5656    APInt One(BitWidth,1);
5657    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
5658    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
5659
5660    // The exit value should be (End+A)/A.
5661    APInt ExitVal = (End + A).udiv(A);
5662    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
5663
5664    // Evaluate at the exit value.  If we really did fall out of the valid
5665    // range, then we computed our trip count, otherwise wrap around or other
5666    // things must have happened.
5667    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
5668    if (Range.contains(Val->getValue()))
5669      return SE.getCouldNotCompute();  // Something strange happened
5670
5671    // Ensure that the previous value is in the range.  This is a sanity check.
5672    assert(Range.contains(
5673           EvaluateConstantChrecAtConstant(this,
5674           ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
5675           "Linear scev computation is off in a bad way!");
5676    return SE.getConstant(ExitValue);
5677  } else if (isQuadratic()) {
5678    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5679    // quadratic equation to solve it.  To do this, we must frame our problem in
5680    // terms of figuring out when zero is crossed, instead of when
5681    // Range.getUpper() is crossed.
5682    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
5683    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
5684    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
5685
5686    // Next, solve the constructed addrec
5687    std::pair<const SCEV *,const SCEV *> Roots =
5688      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
5689    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5690    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5691    if (R1) {
5692      // Pick the smallest positive root value.
5693      if (ConstantInt *CB =
5694          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
5695                         R1->getValue(), R2->getValue()))) {
5696        if (CB->getZExtValue() == false)
5697          std::swap(R1, R2);   // R1 is the minimum root now.
5698
5699        // Make sure the root is not off by one.  The returned iteration should
5700        // not be in the range, but the previous one should be.  When solving
5701        // for "X*X < 5", for example, we should not return a root of 2.
5702        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
5703                                                             R1->getValue(),
5704                                                             SE);
5705        if (Range.contains(R1Val->getValue())) {
5706          // The next iteration must be out of the range...
5707          ConstantInt *NextVal =
5708                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
5709
5710          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5711          if (!Range.contains(R1Val->getValue()))
5712            return SE.getConstant(NextVal);
5713          return SE.getCouldNotCompute();  // Something strange happened
5714        }
5715
5716        // If R1 was not in the range, then it is a good return value.  Make
5717        // sure that R1-1 WAS in the range though, just in case.
5718        ConstantInt *NextVal =
5719               ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
5720        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5721        if (Range.contains(R1Val->getValue()))
5722          return R1;
5723        return SE.getCouldNotCompute();  // Something strange happened
5724      }
5725    }
5726  }
5727
5728  return SE.getCouldNotCompute();
5729}
5730
5731
5732
5733//===----------------------------------------------------------------------===//
5734//                   SCEVCallbackVH Class Implementation
5735//===----------------------------------------------------------------------===//
5736
5737void ScalarEvolution::SCEVCallbackVH::deleted() {
5738  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5739  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
5740    SE->ConstantEvolutionLoopExitValue.erase(PN);
5741  SE->Scalars.erase(getValPtr());
5742  // this now dangles!
5743}
5744
5745void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
5746  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5747
5748  // Forget all the expressions associated with users of the old value,
5749  // so that future queries will recompute the expressions using the new
5750  // value.
5751  Value *Old = getValPtr();
5752  SmallVector<User *, 16> Worklist;
5753  SmallPtrSet<User *, 8> Visited;
5754  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
5755       UI != UE; ++UI)
5756    Worklist.push_back(*UI);
5757  while (!Worklist.empty()) {
5758    User *U = Worklist.pop_back_val();
5759    // Deleting the Old value will cause this to dangle. Postpone
5760    // that until everything else is done.
5761    if (U == Old)
5762      continue;
5763    if (!Visited.insert(U))
5764      continue;
5765    if (PHINode *PN = dyn_cast<PHINode>(U))
5766      SE->ConstantEvolutionLoopExitValue.erase(PN);
5767    SE->Scalars.erase(U);
5768    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
5769         UI != UE; ++UI)
5770      Worklist.push_back(*UI);
5771  }
5772  // Delete the Old value.
5773  if (PHINode *PN = dyn_cast<PHINode>(Old))
5774    SE->ConstantEvolutionLoopExitValue.erase(PN);
5775  SE->Scalars.erase(Old);
5776  // this now dangles!
5777}
5778
5779ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
5780  : CallbackVH(V), SE(se) {}
5781
5782//===----------------------------------------------------------------------===//
5783//                   ScalarEvolution Class Implementation
5784//===----------------------------------------------------------------------===//
5785
5786ScalarEvolution::ScalarEvolution()
5787  : FunctionPass(ID), FirstUnknown(0) {
5788}
5789
5790bool ScalarEvolution::runOnFunction(Function &F) {
5791  this->F = &F;
5792  LI = &getAnalysis<LoopInfo>();
5793  TD = getAnalysisIfAvailable<TargetData>();
5794  DT = &getAnalysis<DominatorTree>();
5795  return false;
5796}
5797
5798void ScalarEvolution::releaseMemory() {
5799  // Iterate through all the SCEVUnknown instances and call their
5800  // destructors, so that they release their references to their values.
5801  for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
5802    U->~SCEVUnknown();
5803  FirstUnknown = 0;
5804
5805  Scalars.clear();
5806  BackedgeTakenCounts.clear();
5807  ConstantEvolutionLoopExitValue.clear();
5808  ValuesAtScopes.clear();
5809  UniqueSCEVs.clear();
5810  SCEVAllocator.Reset();
5811}
5812
5813void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
5814  AU.setPreservesAll();
5815  AU.addRequiredTransitive<LoopInfo>();
5816  AU.addRequiredTransitive<DominatorTree>();
5817}
5818
5819bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
5820  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
5821}
5822
5823static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
5824                          const Loop *L) {
5825  // Print all inner loops first
5826  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
5827    PrintLoopInfo(OS, SE, *I);
5828
5829  OS << "Loop ";
5830  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5831  OS << ": ";
5832
5833  SmallVector<BasicBlock *, 8> ExitBlocks;
5834  L->getExitBlocks(ExitBlocks);
5835  if (ExitBlocks.size() != 1)
5836    OS << "<multiple exits> ";
5837
5838  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5839    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5840  } else {
5841    OS << "Unpredictable backedge-taken count. ";
5842  }
5843
5844  OS << "\n"
5845        "Loop ";
5846  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5847  OS << ": ";
5848
5849  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5850    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5851  } else {
5852    OS << "Unpredictable max backedge-taken count. ";
5853  }
5854
5855  OS << "\n";
5856}
5857
5858void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
5859  // ScalarEvolution's implementation of the print method is to print
5860  // out SCEV values of all instructions that are interesting. Doing
5861  // this potentially causes it to create new SCEV objects though,
5862  // which technically conflicts with the const qualifier. This isn't
5863  // observable from outside the class though, so casting away the
5864  // const isn't dangerous.
5865  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
5866
5867  OS << "Classifying expressions for: ";
5868  WriteAsOperand(OS, F, /*PrintType=*/false);
5869  OS << "\n";
5870  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5871    if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
5872      OS << *I << '\n';
5873      OS << "  -->  ";
5874      const SCEV *SV = SE.getSCEV(&*I);
5875      SV->print(OS);
5876
5877      const Loop *L = LI->getLoopFor((*I).getParent());
5878
5879      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5880      if (AtUse != SV) {
5881        OS << "  -->  ";
5882        AtUse->print(OS);
5883      }
5884
5885      if (L) {
5886        OS << "\t\t" "Exits: ";
5887        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5888        if (!ExitValue->isLoopInvariant(L)) {
5889          OS << "<<Unknown>>";
5890        } else {
5891          OS << *ExitValue;
5892        }
5893      }
5894
5895      OS << "\n";
5896    }
5897
5898  OS << "Determining loop execution counts for: ";
5899  WriteAsOperand(OS, F, /*PrintType=*/false);
5900  OS << "\n";
5901  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5902    PrintLoopInfo(OS, &SE, *I);
5903}
5904
5905