ScalarEvolution.cpp revision 39125d8ef94cbadccd7339d3344e114dedaab12c
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle. We only create one SCEV of a particular shape, so
18// pointer-comparisons for equality are legal.
19//
20// One important aspect of the SCEV objects is that they are never cyclic, even
21// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23// recurrence) then we represent it directly as a recurrence node, otherwise we
24// represent it as a SCEVUnknown node.
25//
26// In addition to being able to represent expressions of various types, we also
27// have folders that are used to build the *canonical* representation for a
28// particular expression.  These folders are capable of using a variety of
29// rewrite rules to simplify the expressions.
30//
31// Once the folders are defined, we can implement the more interesting
32// higher-level code, such as the code that recognizes PHI nodes of various
33// types, computes the execution count of a loop, etc.
34//
35// TODO: We should use these routines and value representations to implement
36// dependence analysis!
37//
38//===----------------------------------------------------------------------===//
39//
40// There are several good references for the techniques used in this analysis.
41//
42//  Chains of recurrences -- a method to expedite the evaluation
43//  of closed-form functions
44//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45//
46//  On computational properties of chains of recurrences
47//  Eugene V. Zima
48//
49//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50//  Robert A. van Engelen
51//
52//  Efficient Symbolic Analysis for Optimizing Compilers
53//  Robert A. van Engelen
54//
55//  Using the chains of recurrences algebra for data dependence testing and
56//  induction variable substitution
57//  MS Thesis, Johnie Birch
58//
59//===----------------------------------------------------------------------===//
60
61#define DEBUG_TYPE "scalar-evolution"
62#include "llvm/Analysis/ScalarEvolutionExpressions.h"
63#include "llvm/Constants.h"
64#include "llvm/DerivedTypes.h"
65#include "llvm/GlobalVariable.h"
66#include "llvm/GlobalAlias.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Operator.h"
70#include "llvm/Analysis/ConstantFolding.h"
71#include "llvm/Analysis/Dominators.h"
72#include "llvm/Analysis/LoopInfo.h"
73#include "llvm/Analysis/ValueTracking.h"
74#include "llvm/Assembly/Writer.h"
75#include "llvm/Target/TargetData.h"
76#include "llvm/Support/CommandLine.h"
77#include "llvm/Support/ConstantRange.h"
78#include "llvm/Support/Debug.h"
79#include "llvm/Support/ErrorHandling.h"
80#include "llvm/Support/GetElementPtrTypeIterator.h"
81#include "llvm/Support/InstIterator.h"
82#include "llvm/Support/MathExtras.h"
83#include "llvm/Support/raw_ostream.h"
84#include "llvm/ADT/Statistic.h"
85#include "llvm/ADT/STLExtras.h"
86#include "llvm/ADT/SmallPtrSet.h"
87#include <algorithm>
88using namespace llvm;
89
90STATISTIC(NumArrayLenItCounts,
91          "Number of trip counts computed with array length");
92STATISTIC(NumTripCountsComputed,
93          "Number of loops with predictable loop counts");
94STATISTIC(NumTripCountsNotComputed,
95          "Number of loops without predictable loop counts");
96STATISTIC(NumBruteForceTripCountsComputed,
97          "Number of loops with trip counts computed by force");
98
99static cl::opt<unsigned>
100MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
101                        cl::desc("Maximum number of iterations SCEV will "
102                                 "symbolically execute a constant "
103                                 "derived loop"),
104                        cl::init(100));
105
106static RegisterPass<ScalarEvolution>
107R("scalar-evolution", "Scalar Evolution Analysis", false, true);
108char ScalarEvolution::ID = 0;
109
110//===----------------------------------------------------------------------===//
111//                           SCEV class definitions
112//===----------------------------------------------------------------------===//
113
114//===----------------------------------------------------------------------===//
115// Implementation of the SCEV class.
116//
117
118SCEV::~SCEV() {}
119
120void SCEV::dump() const {
121  print(dbgs());
122  dbgs() << '\n';
123}
124
125bool SCEV::isZero() const {
126  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
127    return SC->getValue()->isZero();
128  return false;
129}
130
131bool SCEV::isOne() const {
132  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
133    return SC->getValue()->isOne();
134  return false;
135}
136
137bool SCEV::isAllOnesValue() const {
138  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
139    return SC->getValue()->isAllOnesValue();
140  return false;
141}
142
143SCEVCouldNotCompute::SCEVCouldNotCompute() :
144  SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
145
146bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
147  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
148  return false;
149}
150
151const Type *SCEVCouldNotCompute::getType() const {
152  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153  return 0;
154}
155
156bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
157  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158  return false;
159}
160
161bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
162  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
163  return false;
164}
165
166void SCEVCouldNotCompute::print(raw_ostream &OS) const {
167  OS << "***COULDNOTCOMPUTE***";
168}
169
170bool SCEVCouldNotCompute::classof(const SCEV *S) {
171  return S->getSCEVType() == scCouldNotCompute;
172}
173
174const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
175  FoldingSetNodeID ID;
176  ID.AddInteger(scConstant);
177  ID.AddPointer(V);
178  void *IP = 0;
179  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
180  SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
181  new (S) SCEVConstant(ID, V);
182  UniqueSCEVs.InsertNode(S, IP);
183  return S;
184}
185
186const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
187  return getConstant(ConstantInt::get(getContext(), Val));
188}
189
190const SCEV *
191ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
192  return getConstant(
193    ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
194}
195
196const Type *SCEVConstant::getType() const { return V->getType(); }
197
198void SCEVConstant::print(raw_ostream &OS) const {
199  WriteAsOperand(OS, V, false);
200}
201
202SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
203                           unsigned SCEVTy, const SCEV *op, const Type *ty)
204  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
205
206bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
207  return Op->dominates(BB, DT);
208}
209
210bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
211  return Op->properlyDominates(BB, DT);
212}
213
214SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
215                                   const SCEV *op, const Type *ty)
216  : SCEVCastExpr(ID, scTruncate, op, ty) {
217  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
218         (Ty->isInteger() || isa<PointerType>(Ty)) &&
219         "Cannot truncate non-integer value!");
220}
221
222void SCEVTruncateExpr::print(raw_ostream &OS) const {
223  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
224}
225
226SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
227                                       const SCEV *op, const Type *ty)
228  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
229  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
230         (Ty->isInteger() || isa<PointerType>(Ty)) &&
231         "Cannot zero extend non-integer value!");
232}
233
234void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
235  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
236}
237
238SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
239                                       const SCEV *op, const Type *ty)
240  : SCEVCastExpr(ID, scSignExtend, op, ty) {
241  assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
242         (Ty->isInteger() || isa<PointerType>(Ty)) &&
243         "Cannot sign extend non-integer value!");
244}
245
246void SCEVSignExtendExpr::print(raw_ostream &OS) const {
247  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
248}
249
250void SCEVCommutativeExpr::print(raw_ostream &OS) const {
251  assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
252  const char *OpStr = getOperationStr();
253  OS << "(" << *Operands[0];
254  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
255    OS << OpStr << *Operands[i];
256  OS << ")";
257}
258
259bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
260  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
261    if (!getOperand(i)->dominates(BB, DT))
262      return false;
263  }
264  return true;
265}
266
267bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
268  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
269    if (!getOperand(i)->properlyDominates(BB, DT))
270      return false;
271  }
272  return true;
273}
274
275bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
276  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
277}
278
279bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
280  return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
281}
282
283void SCEVUDivExpr::print(raw_ostream &OS) const {
284  OS << "(" << *LHS << " /u " << *RHS << ")";
285}
286
287const Type *SCEVUDivExpr::getType() const {
288  // In most cases the types of LHS and RHS will be the same, but in some
289  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
290  // depend on the type for correctness, but handling types carefully can
291  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
292  // a pointer type than the RHS, so use the RHS' type here.
293  return RHS->getType();
294}
295
296bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
297  // Add recurrences are never invariant in the function-body (null loop).
298  if (!QueryLoop)
299    return false;
300
301  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
302  if (QueryLoop->contains(L))
303    return false;
304
305  // This recurrence is variant w.r.t. QueryLoop if any of its operands
306  // are variant.
307  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
308    if (!getOperand(i)->isLoopInvariant(QueryLoop))
309      return false;
310
311  // Otherwise it's loop-invariant.
312  return true;
313}
314
315bool
316SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
317  return DT->dominates(L->getHeader(), BB) &&
318         SCEVNAryExpr::dominates(BB, DT);
319}
320
321bool
322SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
323  // This uses a "dominates" query instead of "properly dominates" query because
324  // the instruction which produces the addrec's value is a PHI, and a PHI
325  // effectively properly dominates its entire containing block.
326  return DT->dominates(L->getHeader(), BB) &&
327         SCEVNAryExpr::properlyDominates(BB, DT);
328}
329
330void SCEVAddRecExpr::print(raw_ostream &OS) const {
331  OS << "{" << *Operands[0];
332  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
333    OS << ",+," << *Operands[i];
334  OS << "}<";
335  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
336  OS << ">";
337}
338
339bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
340  // All non-instruction values are loop invariant.  All instructions are loop
341  // invariant if they are not contained in the specified loop.
342  // Instructions are never considered invariant in the function body
343  // (null loop) because they are defined within the "loop".
344  if (Instruction *I = dyn_cast<Instruction>(V))
345    return L && !L->contains(I);
346  return true;
347}
348
349bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
350  if (Instruction *I = dyn_cast<Instruction>(getValue()))
351    return DT->dominates(I->getParent(), BB);
352  return true;
353}
354
355bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
356  if (Instruction *I = dyn_cast<Instruction>(getValue()))
357    return DT->properlyDominates(I->getParent(), BB);
358  return true;
359}
360
361const Type *SCEVUnknown::getType() const {
362  return V->getType();
363}
364
365bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
366  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
367    if (VCE->getOpcode() == Instruction::PtrToInt)
368      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
369        if (CE->getOpcode() == Instruction::GetElementPtr &&
370            CE->getOperand(0)->isNullValue() &&
371            CE->getNumOperands() == 2)
372          if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
373            if (CI->isOne()) {
374              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
375                                 ->getElementType();
376              return true;
377            }
378
379  return false;
380}
381
382bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
383  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
384    if (VCE->getOpcode() == Instruction::PtrToInt)
385      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
386        if (CE->getOpcode() == Instruction::GetElementPtr &&
387            CE->getOperand(0)->isNullValue()) {
388          const Type *Ty =
389            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
390          if (const StructType *STy = dyn_cast<StructType>(Ty))
391            if (!STy->isPacked() &&
392                CE->getNumOperands() == 3 &&
393                CE->getOperand(1)->isNullValue()) {
394              if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
395                if (CI->isOne() &&
396                    STy->getNumElements() == 2 &&
397                    STy->getElementType(0)->isInteger(1)) {
398                  AllocTy = STy->getElementType(1);
399                  return true;
400                }
401            }
402        }
403
404  return false;
405}
406
407bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
408  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
409    if (VCE->getOpcode() == Instruction::PtrToInt)
410      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
411        if (CE->getOpcode() == Instruction::GetElementPtr &&
412            CE->getNumOperands() == 3 &&
413            CE->getOperand(0)->isNullValue() &&
414            CE->getOperand(1)->isNullValue()) {
415          const Type *Ty =
416            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
417          // Ignore vector types here so that ScalarEvolutionExpander doesn't
418          // emit getelementptrs that index into vectors.
419          if (isa<StructType>(Ty) || isa<ArrayType>(Ty)) {
420            CTy = Ty;
421            FieldNo = CE->getOperand(2);
422            return true;
423          }
424        }
425
426  return false;
427}
428
429void SCEVUnknown::print(raw_ostream &OS) const {
430  const Type *AllocTy;
431  if (isSizeOf(AllocTy)) {
432    OS << "sizeof(" << *AllocTy << ")";
433    return;
434  }
435  if (isAlignOf(AllocTy)) {
436    OS << "alignof(" << *AllocTy << ")";
437    return;
438  }
439
440  const Type *CTy;
441  Constant *FieldNo;
442  if (isOffsetOf(CTy, FieldNo)) {
443    OS << "offsetof(" << *CTy << ", ";
444    WriteAsOperand(OS, FieldNo, false);
445    OS << ")";
446    return;
447  }
448
449  // Otherwise just print it normally.
450  WriteAsOperand(OS, V, false);
451}
452
453//===----------------------------------------------------------------------===//
454//                               SCEV Utilities
455//===----------------------------------------------------------------------===//
456
457static bool CompareTypes(const Type *A, const Type *B) {
458  if (A->getTypeID() != B->getTypeID())
459    return A->getTypeID() < B->getTypeID();
460  if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
461    const IntegerType *BI = cast<IntegerType>(B);
462    return AI->getBitWidth() < BI->getBitWidth();
463  }
464  if (const PointerType *AI = dyn_cast<PointerType>(A)) {
465    const PointerType *BI = cast<PointerType>(B);
466    return CompareTypes(AI->getElementType(), BI->getElementType());
467  }
468  if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
469    const ArrayType *BI = cast<ArrayType>(B);
470    if (AI->getNumElements() != BI->getNumElements())
471      return AI->getNumElements() < BI->getNumElements();
472    return CompareTypes(AI->getElementType(), BI->getElementType());
473  }
474  if (const VectorType *AI = dyn_cast<VectorType>(A)) {
475    const VectorType *BI = cast<VectorType>(B);
476    if (AI->getNumElements() != BI->getNumElements())
477      return AI->getNumElements() < BI->getNumElements();
478    return CompareTypes(AI->getElementType(), BI->getElementType());
479  }
480  if (const StructType *AI = dyn_cast<StructType>(A)) {
481    const StructType *BI = cast<StructType>(B);
482    if (AI->getNumElements() != BI->getNumElements())
483      return AI->getNumElements() < BI->getNumElements();
484    for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
485      if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
486          CompareTypes(BI->getElementType(i), AI->getElementType(i)))
487        return CompareTypes(AI->getElementType(i), BI->getElementType(i));
488  }
489  return false;
490}
491
492namespace {
493  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
494  /// than the complexity of the RHS.  This comparator is used to canonicalize
495  /// expressions.
496  class SCEVComplexityCompare {
497    LoopInfo *LI;
498  public:
499    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
500
501    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
502      // Fast-path: SCEVs are uniqued so we can do a quick equality check.
503      if (LHS == RHS)
504        return false;
505
506      // Primarily, sort the SCEVs by their getSCEVType().
507      if (LHS->getSCEVType() != RHS->getSCEVType())
508        return LHS->getSCEVType() < RHS->getSCEVType();
509
510      // Aside from the getSCEVType() ordering, the particular ordering
511      // isn't very important except that it's beneficial to be consistent,
512      // so that (a + b) and (b + a) don't end up as different expressions.
513
514      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
515      // not as complete as it could be.
516      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
517        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
518
519        // Order pointer values after integer values. This helps SCEVExpander
520        // form GEPs.
521        if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
522          return false;
523        if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
524          return true;
525
526        // Compare getValueID values.
527        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
528          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
529
530        // Sort arguments by their position.
531        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
532          const Argument *RA = cast<Argument>(RU->getValue());
533          return LA->getArgNo() < RA->getArgNo();
534        }
535
536        // For instructions, compare their loop depth, and their opcode.
537        // This is pretty loose.
538        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
539          Instruction *RV = cast<Instruction>(RU->getValue());
540
541          // Compare loop depths.
542          if (LI->getLoopDepth(LV->getParent()) !=
543              LI->getLoopDepth(RV->getParent()))
544            return LI->getLoopDepth(LV->getParent()) <
545                   LI->getLoopDepth(RV->getParent());
546
547          // Compare opcodes.
548          if (LV->getOpcode() != RV->getOpcode())
549            return LV->getOpcode() < RV->getOpcode();
550
551          // Compare the number of operands.
552          if (LV->getNumOperands() != RV->getNumOperands())
553            return LV->getNumOperands() < RV->getNumOperands();
554        }
555
556        return false;
557      }
558
559      // Compare constant values.
560      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
561        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
562        if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
563          return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
564        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
565      }
566
567      // Compare addrec loop depths.
568      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
569        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
570        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
571          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
572      }
573
574      // Lexicographically compare n-ary expressions.
575      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
576        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
577        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
578          if (i >= RC->getNumOperands())
579            return false;
580          if (operator()(LC->getOperand(i), RC->getOperand(i)))
581            return true;
582          if (operator()(RC->getOperand(i), LC->getOperand(i)))
583            return false;
584        }
585        return LC->getNumOperands() < RC->getNumOperands();
586      }
587
588      // Lexicographically compare udiv expressions.
589      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
590        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
591        if (operator()(LC->getLHS(), RC->getLHS()))
592          return true;
593        if (operator()(RC->getLHS(), LC->getLHS()))
594          return false;
595        if (operator()(LC->getRHS(), RC->getRHS()))
596          return true;
597        if (operator()(RC->getRHS(), LC->getRHS()))
598          return false;
599        return false;
600      }
601
602      // Compare cast expressions by operand.
603      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
604        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
605        return operator()(LC->getOperand(), RC->getOperand());
606      }
607
608      llvm_unreachable("Unknown SCEV kind!");
609      return false;
610    }
611  };
612}
613
614/// GroupByComplexity - Given a list of SCEV objects, order them by their
615/// complexity, and group objects of the same complexity together by value.
616/// When this routine is finished, we know that any duplicates in the vector are
617/// consecutive and that complexity is monotonically increasing.
618///
619/// Note that we go take special precautions to ensure that we get determinstic
620/// results from this routine.  In other words, we don't want the results of
621/// this to depend on where the addresses of various SCEV objects happened to
622/// land in memory.
623///
624static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
625                              LoopInfo *LI) {
626  if (Ops.size() < 2) return;  // Noop
627  if (Ops.size() == 2) {
628    // This is the common case, which also happens to be trivially simple.
629    // Special case it.
630    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
631      std::swap(Ops[0], Ops[1]);
632    return;
633  }
634
635  // Do the rough sort by complexity.
636  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
637
638  // Now that we are sorted by complexity, group elements of the same
639  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
640  // be extremely short in practice.  Note that we take this approach because we
641  // do not want to depend on the addresses of the objects we are grouping.
642  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
643    const SCEV *S = Ops[i];
644    unsigned Complexity = S->getSCEVType();
645
646    // If there are any objects of the same complexity and same value as this
647    // one, group them.
648    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
649      if (Ops[j] == S) { // Found a duplicate.
650        // Move it to immediately after i'th element.
651        std::swap(Ops[i+1], Ops[j]);
652        ++i;   // no need to rescan it.
653        if (i == e-2) return;  // Done!
654      }
655    }
656  }
657}
658
659
660
661//===----------------------------------------------------------------------===//
662//                      Simple SCEV method implementations
663//===----------------------------------------------------------------------===//
664
665/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
666/// Assume, K > 0.
667static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
668                                       ScalarEvolution &SE,
669                                       const Type* ResultTy) {
670  // Handle the simplest case efficiently.
671  if (K == 1)
672    return SE.getTruncateOrZeroExtend(It, ResultTy);
673
674  // We are using the following formula for BC(It, K):
675  //
676  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
677  //
678  // Suppose, W is the bitwidth of the return value.  We must be prepared for
679  // overflow.  Hence, we must assure that the result of our computation is
680  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
681  // safe in modular arithmetic.
682  //
683  // However, this code doesn't use exactly that formula; the formula it uses
684  // is something like the following, where T is the number of factors of 2 in
685  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
686  // exponentiation:
687  //
688  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
689  //
690  // This formula is trivially equivalent to the previous formula.  However,
691  // this formula can be implemented much more efficiently.  The trick is that
692  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
693  // arithmetic.  To do exact division in modular arithmetic, all we have
694  // to do is multiply by the inverse.  Therefore, this step can be done at
695  // width W.
696  //
697  // The next issue is how to safely do the division by 2^T.  The way this
698  // is done is by doing the multiplication step at a width of at least W + T
699  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
700  // when we perform the division by 2^T (which is equivalent to a right shift
701  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
702  // truncated out after the division by 2^T.
703  //
704  // In comparison to just directly using the first formula, this technique
705  // is much more efficient; using the first formula requires W * K bits,
706  // but this formula less than W + K bits. Also, the first formula requires
707  // a division step, whereas this formula only requires multiplies and shifts.
708  //
709  // It doesn't matter whether the subtraction step is done in the calculation
710  // width or the input iteration count's width; if the subtraction overflows,
711  // the result must be zero anyway.  We prefer here to do it in the width of
712  // the induction variable because it helps a lot for certain cases; CodeGen
713  // isn't smart enough to ignore the overflow, which leads to much less
714  // efficient code if the width of the subtraction is wider than the native
715  // register width.
716  //
717  // (It's possible to not widen at all by pulling out factors of 2 before
718  // the multiplication; for example, K=2 can be calculated as
719  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
720  // extra arithmetic, so it's not an obvious win, and it gets
721  // much more complicated for K > 3.)
722
723  // Protection from insane SCEVs; this bound is conservative,
724  // but it probably doesn't matter.
725  if (K > 1000)
726    return SE.getCouldNotCompute();
727
728  unsigned W = SE.getTypeSizeInBits(ResultTy);
729
730  // Calculate K! / 2^T and T; we divide out the factors of two before
731  // multiplying for calculating K! / 2^T to avoid overflow.
732  // Other overflow doesn't matter because we only care about the bottom
733  // W bits of the result.
734  APInt OddFactorial(W, 1);
735  unsigned T = 1;
736  for (unsigned i = 3; i <= K; ++i) {
737    APInt Mult(W, i);
738    unsigned TwoFactors = Mult.countTrailingZeros();
739    T += TwoFactors;
740    Mult = Mult.lshr(TwoFactors);
741    OddFactorial *= Mult;
742  }
743
744  // We need at least W + T bits for the multiplication step
745  unsigned CalculationBits = W + T;
746
747  // Calcuate 2^T, at width T+W.
748  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
749
750  // Calculate the multiplicative inverse of K! / 2^T;
751  // this multiplication factor will perform the exact division by
752  // K! / 2^T.
753  APInt Mod = APInt::getSignedMinValue(W+1);
754  APInt MultiplyFactor = OddFactorial.zext(W+1);
755  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
756  MultiplyFactor = MultiplyFactor.trunc(W);
757
758  // Calculate the product, at width T+W
759  const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
760                                                      CalculationBits);
761  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
762  for (unsigned i = 1; i != K; ++i) {
763    const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
764    Dividend = SE.getMulExpr(Dividend,
765                             SE.getTruncateOrZeroExtend(S, CalculationTy));
766  }
767
768  // Divide by 2^T
769  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
770
771  // Truncate the result, and divide by K! / 2^T.
772
773  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
774                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
775}
776
777/// evaluateAtIteration - Return the value of this chain of recurrences at
778/// the specified iteration number.  We can evaluate this recurrence by
779/// multiplying each element in the chain by the binomial coefficient
780/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
781///
782///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
783///
784/// where BC(It, k) stands for binomial coefficient.
785///
786const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
787                                                ScalarEvolution &SE) const {
788  const SCEV *Result = getStart();
789  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
790    // The computation is correct in the face of overflow provided that the
791    // multiplication is performed _after_ the evaluation of the binomial
792    // coefficient.
793    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
794    if (isa<SCEVCouldNotCompute>(Coeff))
795      return Coeff;
796
797    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
798  }
799  return Result;
800}
801
802//===----------------------------------------------------------------------===//
803//                    SCEV Expression folder implementations
804//===----------------------------------------------------------------------===//
805
806const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
807                                             const Type *Ty) {
808  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
809         "This is not a truncating conversion!");
810  assert(isSCEVable(Ty) &&
811         "This is not a conversion to a SCEVable type!");
812  Ty = getEffectiveSCEVType(Ty);
813
814  FoldingSetNodeID ID;
815  ID.AddInteger(scTruncate);
816  ID.AddPointer(Op);
817  ID.AddPointer(Ty);
818  void *IP = 0;
819  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
820
821  // Fold if the operand is constant.
822  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
823    return getConstant(
824      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
825
826  // trunc(trunc(x)) --> trunc(x)
827  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
828    return getTruncateExpr(ST->getOperand(), Ty);
829
830  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
831  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
832    return getTruncateOrSignExtend(SS->getOperand(), Ty);
833
834  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
835  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
836    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
837
838  // If the input value is a chrec scev, truncate the chrec's operands.
839  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
840    SmallVector<const SCEV *, 4> Operands;
841    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
842      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
843    return getAddRecExpr(Operands, AddRec->getLoop());
844  }
845
846  // The cast wasn't folded; create an explicit cast node.
847  // Recompute the insert position, as it may have been invalidated.
848  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
849  SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
850  new (S) SCEVTruncateExpr(ID, Op, Ty);
851  UniqueSCEVs.InsertNode(S, IP);
852  return S;
853}
854
855const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
856                                               const Type *Ty) {
857  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
858         "This is not an extending conversion!");
859  assert(isSCEVable(Ty) &&
860         "This is not a conversion to a SCEVable type!");
861  Ty = getEffectiveSCEVType(Ty);
862
863  // Fold if the operand is constant.
864  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
865    const Type *IntTy = getEffectiveSCEVType(Ty);
866    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
867    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
868    return getConstant(cast<ConstantInt>(C));
869  }
870
871  // zext(zext(x)) --> zext(x)
872  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
873    return getZeroExtendExpr(SZ->getOperand(), Ty);
874
875  // Before doing any expensive analysis, check to see if we've already
876  // computed a SCEV for this Op and Ty.
877  FoldingSetNodeID ID;
878  ID.AddInteger(scZeroExtend);
879  ID.AddPointer(Op);
880  ID.AddPointer(Ty);
881  void *IP = 0;
882  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
883
884  // If the input value is a chrec scev, and we can prove that the value
885  // did not overflow the old, smaller, value, we can zero extend all of the
886  // operands (often constants).  This allows analysis of something like
887  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
888  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
889    if (AR->isAffine()) {
890      const SCEV *Start = AR->getStart();
891      const SCEV *Step = AR->getStepRecurrence(*this);
892      unsigned BitWidth = getTypeSizeInBits(AR->getType());
893      const Loop *L = AR->getLoop();
894
895      // If we have special knowledge that this addrec won't overflow,
896      // we don't need to do any further analysis.
897      if (AR->hasNoUnsignedWrap())
898        return getAddRecExpr(getZeroExtendExpr(Start, Ty),
899                             getZeroExtendExpr(Step, Ty),
900                             L);
901
902      // Check whether the backedge-taken count is SCEVCouldNotCompute.
903      // Note that this serves two purposes: It filters out loops that are
904      // simply not analyzable, and it covers the case where this code is
905      // being called from within backedge-taken count analysis, such that
906      // attempting to ask for the backedge-taken count would likely result
907      // in infinite recursion. In the later case, the analysis code will
908      // cope with a conservative value, and it will take care to purge
909      // that value once it has finished.
910      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
911      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
912        // Manually compute the final value for AR, checking for
913        // overflow.
914
915        // Check whether the backedge-taken count can be losslessly casted to
916        // the addrec's type. The count is always unsigned.
917        const SCEV *CastedMaxBECount =
918          getTruncateOrZeroExtend(MaxBECount, Start->getType());
919        const SCEV *RecastedMaxBECount =
920          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
921        if (MaxBECount == RecastedMaxBECount) {
922          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
923          // Check whether Start+Step*MaxBECount has no unsigned overflow.
924          const SCEV *ZMul =
925            getMulExpr(CastedMaxBECount,
926                       getTruncateOrZeroExtend(Step, Start->getType()));
927          const SCEV *Add = getAddExpr(Start, ZMul);
928          const SCEV *OperandExtendedAdd =
929            getAddExpr(getZeroExtendExpr(Start, WideTy),
930                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
931                                  getZeroExtendExpr(Step, WideTy)));
932          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
933            // Return the expression with the addrec on the outside.
934            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
935                                 getZeroExtendExpr(Step, Ty),
936                                 L);
937
938          // Similar to above, only this time treat the step value as signed.
939          // This covers loops that count down.
940          const SCEV *SMul =
941            getMulExpr(CastedMaxBECount,
942                       getTruncateOrSignExtend(Step, Start->getType()));
943          Add = getAddExpr(Start, SMul);
944          OperandExtendedAdd =
945            getAddExpr(getZeroExtendExpr(Start, WideTy),
946                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
947                                  getSignExtendExpr(Step, WideTy)));
948          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
949            // Return the expression with the addrec on the outside.
950            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
951                                 getSignExtendExpr(Step, Ty),
952                                 L);
953        }
954
955        // If the backedge is guarded by a comparison with the pre-inc value
956        // the addrec is safe. Also, if the entry is guarded by a comparison
957        // with the start value and the backedge is guarded by a comparison
958        // with the post-inc value, the addrec is safe.
959        if (isKnownPositive(Step)) {
960          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
961                                      getUnsignedRange(Step).getUnsignedMax());
962          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
963              (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
964               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
965                                           AR->getPostIncExpr(*this), N)))
966            // Return the expression with the addrec on the outside.
967            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
968                                 getZeroExtendExpr(Step, Ty),
969                                 L);
970        } else if (isKnownNegative(Step)) {
971          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
972                                      getSignedRange(Step).getSignedMin());
973          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
974              (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
975               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
976                                           AR->getPostIncExpr(*this), N)))
977            // Return the expression with the addrec on the outside.
978            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
979                                 getSignExtendExpr(Step, Ty),
980                                 L);
981        }
982      }
983    }
984
985  // The cast wasn't folded; create an explicit cast node.
986  // Recompute the insert position, as it may have been invalidated.
987  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
988  SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
989  new (S) SCEVZeroExtendExpr(ID, Op, Ty);
990  UniqueSCEVs.InsertNode(S, IP);
991  return S;
992}
993
994const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
995                                               const Type *Ty) {
996  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
997         "This is not an extending conversion!");
998  assert(isSCEVable(Ty) &&
999         "This is not a conversion to a SCEVable type!");
1000  Ty = getEffectiveSCEVType(Ty);
1001
1002  // Fold if the operand is constant.
1003  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
1004    const Type *IntTy = getEffectiveSCEVType(Ty);
1005    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
1006    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
1007    return getConstant(cast<ConstantInt>(C));
1008  }
1009
1010  // sext(sext(x)) --> sext(x)
1011  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1012    return getSignExtendExpr(SS->getOperand(), Ty);
1013
1014  // Before doing any expensive analysis, check to see if we've already
1015  // computed a SCEV for this Op and Ty.
1016  FoldingSetNodeID ID;
1017  ID.AddInteger(scSignExtend);
1018  ID.AddPointer(Op);
1019  ID.AddPointer(Ty);
1020  void *IP = 0;
1021  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1022
1023  // If the input value is a chrec scev, and we can prove that the value
1024  // did not overflow the old, smaller, value, we can sign extend all of the
1025  // operands (often constants).  This allows analysis of something like
1026  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1027  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1028    if (AR->isAffine()) {
1029      const SCEV *Start = AR->getStart();
1030      const SCEV *Step = AR->getStepRecurrence(*this);
1031      unsigned BitWidth = getTypeSizeInBits(AR->getType());
1032      const Loop *L = AR->getLoop();
1033
1034      // If we have special knowledge that this addrec won't overflow,
1035      // we don't need to do any further analysis.
1036      if (AR->hasNoSignedWrap())
1037        return getAddRecExpr(getSignExtendExpr(Start, Ty),
1038                             getSignExtendExpr(Step, Ty),
1039                             L);
1040
1041      // Check whether the backedge-taken count is SCEVCouldNotCompute.
1042      // Note that this serves two purposes: It filters out loops that are
1043      // simply not analyzable, and it covers the case where this code is
1044      // being called from within backedge-taken count analysis, such that
1045      // attempting to ask for the backedge-taken count would likely result
1046      // in infinite recursion. In the later case, the analysis code will
1047      // cope with a conservative value, and it will take care to purge
1048      // that value once it has finished.
1049      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1050      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1051        // Manually compute the final value for AR, checking for
1052        // overflow.
1053
1054        // Check whether the backedge-taken count can be losslessly casted to
1055        // the addrec's type. The count is always unsigned.
1056        const SCEV *CastedMaxBECount =
1057          getTruncateOrZeroExtend(MaxBECount, Start->getType());
1058        const SCEV *RecastedMaxBECount =
1059          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1060        if (MaxBECount == RecastedMaxBECount) {
1061          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1062          // Check whether Start+Step*MaxBECount has no signed overflow.
1063          const SCEV *SMul =
1064            getMulExpr(CastedMaxBECount,
1065                       getTruncateOrSignExtend(Step, Start->getType()));
1066          const SCEV *Add = getAddExpr(Start, SMul);
1067          const SCEV *OperandExtendedAdd =
1068            getAddExpr(getSignExtendExpr(Start, WideTy),
1069                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1070                                  getSignExtendExpr(Step, WideTy)));
1071          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1072            // Return the expression with the addrec on the outside.
1073            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1074                                 getSignExtendExpr(Step, Ty),
1075                                 L);
1076
1077          // Similar to above, only this time treat the step value as unsigned.
1078          // This covers loops that count up with an unsigned step.
1079          const SCEV *UMul =
1080            getMulExpr(CastedMaxBECount,
1081                       getTruncateOrZeroExtend(Step, Start->getType()));
1082          Add = getAddExpr(Start, UMul);
1083          OperandExtendedAdd =
1084            getAddExpr(getSignExtendExpr(Start, WideTy),
1085                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1086                                  getZeroExtendExpr(Step, WideTy)));
1087          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1088            // Return the expression with the addrec on the outside.
1089            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1090                                 getZeroExtendExpr(Step, Ty),
1091                                 L);
1092        }
1093
1094        // If the backedge is guarded by a comparison with the pre-inc value
1095        // the addrec is safe. Also, if the entry is guarded by a comparison
1096        // with the start value and the backedge is guarded by a comparison
1097        // with the post-inc value, the addrec is safe.
1098        if (isKnownPositive(Step)) {
1099          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
1100                                      getSignedRange(Step).getSignedMax());
1101          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
1102              (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
1103               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
1104                                           AR->getPostIncExpr(*this), N)))
1105            // Return the expression with the addrec on the outside.
1106            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1107                                 getSignExtendExpr(Step, Ty),
1108                                 L);
1109        } else if (isKnownNegative(Step)) {
1110          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
1111                                      getSignedRange(Step).getSignedMin());
1112          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1113              (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1114               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1115                                           AR->getPostIncExpr(*this), N)))
1116            // Return the expression with the addrec on the outside.
1117            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1118                                 getSignExtendExpr(Step, Ty),
1119                                 L);
1120        }
1121      }
1122    }
1123
1124  // The cast wasn't folded; create an explicit cast node.
1125  // Recompute the insert position, as it may have been invalidated.
1126  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1127  SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
1128  new (S) SCEVSignExtendExpr(ID, Op, Ty);
1129  UniqueSCEVs.InsertNode(S, IP);
1130  return S;
1131}
1132
1133/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1134/// unspecified bits out to the given type.
1135///
1136const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1137                                              const Type *Ty) {
1138  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1139         "This is not an extending conversion!");
1140  assert(isSCEVable(Ty) &&
1141         "This is not a conversion to a SCEVable type!");
1142  Ty = getEffectiveSCEVType(Ty);
1143
1144  // Sign-extend negative constants.
1145  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1146    if (SC->getValue()->getValue().isNegative())
1147      return getSignExtendExpr(Op, Ty);
1148
1149  // Peel off a truncate cast.
1150  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1151    const SCEV *NewOp = T->getOperand();
1152    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1153      return getAnyExtendExpr(NewOp, Ty);
1154    return getTruncateOrNoop(NewOp, Ty);
1155  }
1156
1157  // Next try a zext cast. If the cast is folded, use it.
1158  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1159  if (!isa<SCEVZeroExtendExpr>(ZExt))
1160    return ZExt;
1161
1162  // Next try a sext cast. If the cast is folded, use it.
1163  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1164  if (!isa<SCEVSignExtendExpr>(SExt))
1165    return SExt;
1166
1167  // Force the cast to be folded into the operands of an addrec.
1168  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1169    SmallVector<const SCEV *, 4> Ops;
1170    for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1171         I != E; ++I)
1172      Ops.push_back(getAnyExtendExpr(*I, Ty));
1173    return getAddRecExpr(Ops, AR->getLoop());
1174  }
1175
1176  // If the expression is obviously signed, use the sext cast value.
1177  if (isa<SCEVSMaxExpr>(Op))
1178    return SExt;
1179
1180  // Absent any other information, use the zext cast value.
1181  return ZExt;
1182}
1183
1184/// CollectAddOperandsWithScales - Process the given Ops list, which is
1185/// a list of operands to be added under the given scale, update the given
1186/// map. This is a helper function for getAddRecExpr. As an example of
1187/// what it does, given a sequence of operands that would form an add
1188/// expression like this:
1189///
1190///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1191///
1192/// where A and B are constants, update the map with these values:
1193///
1194///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1195///
1196/// and add 13 + A*B*29 to AccumulatedConstant.
1197/// This will allow getAddRecExpr to produce this:
1198///
1199///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1200///
1201/// This form often exposes folding opportunities that are hidden in
1202/// the original operand list.
1203///
1204/// Return true iff it appears that any interesting folding opportunities
1205/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1206/// the common case where no interesting opportunities are present, and
1207/// is also used as a check to avoid infinite recursion.
1208///
1209static bool
1210CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1211                             SmallVector<const SCEV *, 8> &NewOps,
1212                             APInt &AccumulatedConstant,
1213                             const SmallVectorImpl<const SCEV *> &Ops,
1214                             const APInt &Scale,
1215                             ScalarEvolution &SE) {
1216  bool Interesting = false;
1217
1218  // Iterate over the add operands.
1219  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1220    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1221    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1222      APInt NewScale =
1223        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1224      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1225        // A multiplication of a constant with another add; recurse.
1226        Interesting |=
1227          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1228                                       cast<SCEVAddExpr>(Mul->getOperand(1))
1229                                         ->getOperands(),
1230                                       NewScale, SE);
1231      } else {
1232        // A multiplication of a constant with some other value. Update
1233        // the map.
1234        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1235        const SCEV *Key = SE.getMulExpr(MulOps);
1236        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1237          M.insert(std::make_pair(Key, NewScale));
1238        if (Pair.second) {
1239          NewOps.push_back(Pair.first->first);
1240        } else {
1241          Pair.first->second += NewScale;
1242          // The map already had an entry for this value, which may indicate
1243          // a folding opportunity.
1244          Interesting = true;
1245        }
1246      }
1247    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1248      // Pull a buried constant out to the outside.
1249      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1250        Interesting = true;
1251      AccumulatedConstant += Scale * C->getValue()->getValue();
1252    } else {
1253      // An ordinary operand. Update the map.
1254      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1255        M.insert(std::make_pair(Ops[i], Scale));
1256      if (Pair.second) {
1257        NewOps.push_back(Pair.first->first);
1258      } else {
1259        Pair.first->second += Scale;
1260        // The map already had an entry for this value, which may indicate
1261        // a folding opportunity.
1262        Interesting = true;
1263      }
1264    }
1265  }
1266
1267  return Interesting;
1268}
1269
1270namespace {
1271  struct APIntCompare {
1272    bool operator()(const APInt &LHS, const APInt &RHS) const {
1273      return LHS.ult(RHS);
1274    }
1275  };
1276}
1277
1278/// getAddExpr - Get a canonical add expression, or something simpler if
1279/// possible.
1280const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1281                                        bool HasNUW, bool HasNSW) {
1282  assert(!Ops.empty() && "Cannot get empty add!");
1283  if (Ops.size() == 1) return Ops[0];
1284#ifndef NDEBUG
1285  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1286    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1287           getEffectiveSCEVType(Ops[0]->getType()) &&
1288           "SCEVAddExpr operand types don't match!");
1289#endif
1290
1291  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1292  if (!HasNUW && HasNSW) {
1293    bool All = true;
1294    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1295      if (!isKnownNonNegative(Ops[i])) {
1296        All = false;
1297        break;
1298      }
1299    if (All) HasNUW = true;
1300  }
1301
1302  // Sort by complexity, this groups all similar expression types together.
1303  GroupByComplexity(Ops, LI);
1304
1305  // If there are any constants, fold them together.
1306  unsigned Idx = 0;
1307  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1308    ++Idx;
1309    assert(Idx < Ops.size());
1310    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1311      // We found two constants, fold them together!
1312      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1313                           RHSC->getValue()->getValue());
1314      if (Ops.size() == 2) return Ops[0];
1315      Ops.erase(Ops.begin()+1);  // Erase the folded element
1316      LHSC = cast<SCEVConstant>(Ops[0]);
1317    }
1318
1319    // If we are left with a constant zero being added, strip it off.
1320    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1321      Ops.erase(Ops.begin());
1322      --Idx;
1323    }
1324  }
1325
1326  if (Ops.size() == 1) return Ops[0];
1327
1328  // Okay, check to see if the same value occurs in the operand list twice.  If
1329  // so, merge them together into an multiply expression.  Since we sorted the
1330  // list, these values are required to be adjacent.
1331  const Type *Ty = Ops[0]->getType();
1332  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1333    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1334      // Found a match, merge the two values into a multiply, and add any
1335      // remaining values to the result.
1336      const SCEV *Two = getIntegerSCEV(2, Ty);
1337      const SCEV *Mul = getMulExpr(Ops[i], Two);
1338      if (Ops.size() == 2)
1339        return Mul;
1340      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1341      Ops.push_back(Mul);
1342      return getAddExpr(Ops, HasNUW, HasNSW);
1343    }
1344
1345  // Check for truncates. If all the operands are truncated from the same
1346  // type, see if factoring out the truncate would permit the result to be
1347  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1348  // if the contents of the resulting outer trunc fold to something simple.
1349  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1350    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1351    const Type *DstType = Trunc->getType();
1352    const Type *SrcType = Trunc->getOperand()->getType();
1353    SmallVector<const SCEV *, 8> LargeOps;
1354    bool Ok = true;
1355    // Check all the operands to see if they can be represented in the
1356    // source type of the truncate.
1357    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1358      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1359        if (T->getOperand()->getType() != SrcType) {
1360          Ok = false;
1361          break;
1362        }
1363        LargeOps.push_back(T->getOperand());
1364      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1365        // This could be either sign or zero extension, but sign extension
1366        // is much more likely to be foldable here.
1367        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1368      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1369        SmallVector<const SCEV *, 8> LargeMulOps;
1370        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1371          if (const SCEVTruncateExpr *T =
1372                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1373            if (T->getOperand()->getType() != SrcType) {
1374              Ok = false;
1375              break;
1376            }
1377            LargeMulOps.push_back(T->getOperand());
1378          } else if (const SCEVConstant *C =
1379                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1380            // This could be either sign or zero extension, but sign extension
1381            // is much more likely to be foldable here.
1382            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1383          } else {
1384            Ok = false;
1385            break;
1386          }
1387        }
1388        if (Ok)
1389          LargeOps.push_back(getMulExpr(LargeMulOps));
1390      } else {
1391        Ok = false;
1392        break;
1393      }
1394    }
1395    if (Ok) {
1396      // Evaluate the expression in the larger type.
1397      const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
1398      // If it folds to something simple, use it. Otherwise, don't.
1399      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1400        return getTruncateExpr(Fold, DstType);
1401    }
1402  }
1403
1404  // Skip past any other cast SCEVs.
1405  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1406    ++Idx;
1407
1408  // If there are add operands they would be next.
1409  if (Idx < Ops.size()) {
1410    bool DeletedAdd = false;
1411    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1412      // If we have an add, expand the add operands onto the end of the operands
1413      // list.
1414      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1415      Ops.erase(Ops.begin()+Idx);
1416      DeletedAdd = true;
1417    }
1418
1419    // If we deleted at least one add, we added operands to the end of the list,
1420    // and they are not necessarily sorted.  Recurse to resort and resimplify
1421    // any operands we just aquired.
1422    if (DeletedAdd)
1423      return getAddExpr(Ops);
1424  }
1425
1426  // Skip over the add expression until we get to a multiply.
1427  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1428    ++Idx;
1429
1430  // Check to see if there are any folding opportunities present with
1431  // operands multiplied by constant values.
1432  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1433    uint64_t BitWidth = getTypeSizeInBits(Ty);
1434    DenseMap<const SCEV *, APInt> M;
1435    SmallVector<const SCEV *, 8> NewOps;
1436    APInt AccumulatedConstant(BitWidth, 0);
1437    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1438                                     Ops, APInt(BitWidth, 1), *this)) {
1439      // Some interesting folding opportunity is present, so its worthwhile to
1440      // re-generate the operands list. Group the operands by constant scale,
1441      // to avoid multiplying by the same constant scale multiple times.
1442      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1443      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1444           E = NewOps.end(); I != E; ++I)
1445        MulOpLists[M.find(*I)->second].push_back(*I);
1446      // Re-generate the operands list.
1447      Ops.clear();
1448      if (AccumulatedConstant != 0)
1449        Ops.push_back(getConstant(AccumulatedConstant));
1450      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1451           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1452        if (I->first != 0)
1453          Ops.push_back(getMulExpr(getConstant(I->first),
1454                                   getAddExpr(I->second)));
1455      if (Ops.empty())
1456        return getIntegerSCEV(0, Ty);
1457      if (Ops.size() == 1)
1458        return Ops[0];
1459      return getAddExpr(Ops);
1460    }
1461  }
1462
1463  // If we are adding something to a multiply expression, make sure the
1464  // something is not already an operand of the multiply.  If so, merge it into
1465  // the multiply.
1466  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1467    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1468    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1469      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1470      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1471        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1472          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1473          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1474          if (Mul->getNumOperands() != 2) {
1475            // If the multiply has more than two operands, we must get the
1476            // Y*Z term.
1477            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1478            MulOps.erase(MulOps.begin()+MulOp);
1479            InnerMul = getMulExpr(MulOps);
1480          }
1481          const SCEV *One = getIntegerSCEV(1, Ty);
1482          const SCEV *AddOne = getAddExpr(InnerMul, One);
1483          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1484          if (Ops.size() == 2) return OuterMul;
1485          if (AddOp < Idx) {
1486            Ops.erase(Ops.begin()+AddOp);
1487            Ops.erase(Ops.begin()+Idx-1);
1488          } else {
1489            Ops.erase(Ops.begin()+Idx);
1490            Ops.erase(Ops.begin()+AddOp-1);
1491          }
1492          Ops.push_back(OuterMul);
1493          return getAddExpr(Ops);
1494        }
1495
1496      // Check this multiply against other multiplies being added together.
1497      for (unsigned OtherMulIdx = Idx+1;
1498           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1499           ++OtherMulIdx) {
1500        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1501        // If MulOp occurs in OtherMul, we can fold the two multiplies
1502        // together.
1503        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1504             OMulOp != e; ++OMulOp)
1505          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1506            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1507            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1508            if (Mul->getNumOperands() != 2) {
1509              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1510                                                  Mul->op_end());
1511              MulOps.erase(MulOps.begin()+MulOp);
1512              InnerMul1 = getMulExpr(MulOps);
1513            }
1514            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1515            if (OtherMul->getNumOperands() != 2) {
1516              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1517                                                  OtherMul->op_end());
1518              MulOps.erase(MulOps.begin()+OMulOp);
1519              InnerMul2 = getMulExpr(MulOps);
1520            }
1521            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1522            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1523            if (Ops.size() == 2) return OuterMul;
1524            Ops.erase(Ops.begin()+Idx);
1525            Ops.erase(Ops.begin()+OtherMulIdx-1);
1526            Ops.push_back(OuterMul);
1527            return getAddExpr(Ops);
1528          }
1529      }
1530    }
1531  }
1532
1533  // If there are any add recurrences in the operands list, see if any other
1534  // added values are loop invariant.  If so, we can fold them into the
1535  // recurrence.
1536  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1537    ++Idx;
1538
1539  // Scan over all recurrences, trying to fold loop invariants into them.
1540  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1541    // Scan all of the other operands to this add and add them to the vector if
1542    // they are loop invariant w.r.t. the recurrence.
1543    SmallVector<const SCEV *, 8> LIOps;
1544    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1545    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1546      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1547        LIOps.push_back(Ops[i]);
1548        Ops.erase(Ops.begin()+i);
1549        --i; --e;
1550      }
1551
1552    // If we found some loop invariants, fold them into the recurrence.
1553    if (!LIOps.empty()) {
1554      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1555      LIOps.push_back(AddRec->getStart());
1556
1557      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1558                                             AddRec->op_end());
1559      AddRecOps[0] = getAddExpr(LIOps);
1560
1561      // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
1562      // is not associative so this isn't necessarily safe.
1563      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1564
1565      // If all of the other operands were loop invariant, we are done.
1566      if (Ops.size() == 1) return NewRec;
1567
1568      // Otherwise, add the folded AddRec by the non-liv parts.
1569      for (unsigned i = 0;; ++i)
1570        if (Ops[i] == AddRec) {
1571          Ops[i] = NewRec;
1572          break;
1573        }
1574      return getAddExpr(Ops);
1575    }
1576
1577    // Okay, if there weren't any loop invariants to be folded, check to see if
1578    // there are multiple AddRec's with the same loop induction variable being
1579    // added together.  If so, we can fold them.
1580    for (unsigned OtherIdx = Idx+1;
1581         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1582      if (OtherIdx != Idx) {
1583        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1584        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1585          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1586          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1587                                              AddRec->op_end());
1588          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1589            if (i >= NewOps.size()) {
1590              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1591                            OtherAddRec->op_end());
1592              break;
1593            }
1594            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1595          }
1596          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1597
1598          if (Ops.size() == 2) return NewAddRec;
1599
1600          Ops.erase(Ops.begin()+Idx);
1601          Ops.erase(Ops.begin()+OtherIdx-1);
1602          Ops.push_back(NewAddRec);
1603          return getAddExpr(Ops);
1604        }
1605      }
1606
1607    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1608    // next one.
1609  }
1610
1611  // Okay, it looks like we really DO need an add expr.  Check to see if we
1612  // already have one, otherwise create a new one.
1613  FoldingSetNodeID ID;
1614  ID.AddInteger(scAddExpr);
1615  ID.AddInteger(Ops.size());
1616  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1617    ID.AddPointer(Ops[i]);
1618  void *IP = 0;
1619  SCEVAddExpr *S =
1620    static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1621  if (!S) {
1622    S = SCEVAllocator.Allocate<SCEVAddExpr>();
1623    new (S) SCEVAddExpr(ID, Ops);
1624    UniqueSCEVs.InsertNode(S, IP);
1625  }
1626  if (HasNUW) S->setHasNoUnsignedWrap(true);
1627  if (HasNSW) S->setHasNoSignedWrap(true);
1628  return S;
1629}
1630
1631/// getMulExpr - Get a canonical multiply expression, or something simpler if
1632/// possible.
1633const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1634                                        bool HasNUW, bool HasNSW) {
1635  assert(!Ops.empty() && "Cannot get empty mul!");
1636  if (Ops.size() == 1) return Ops[0];
1637#ifndef NDEBUG
1638  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1639    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1640           getEffectiveSCEVType(Ops[0]->getType()) &&
1641           "SCEVMulExpr operand types don't match!");
1642#endif
1643
1644  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1645  if (!HasNUW && HasNSW) {
1646    bool All = true;
1647    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1648      if (!isKnownNonNegative(Ops[i])) {
1649        All = false;
1650        break;
1651      }
1652    if (All) HasNUW = true;
1653  }
1654
1655  // Sort by complexity, this groups all similar expression types together.
1656  GroupByComplexity(Ops, LI);
1657
1658  // If there are any constants, fold them together.
1659  unsigned Idx = 0;
1660  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1661
1662    // C1*(C2+V) -> C1*C2 + C1*V
1663    if (Ops.size() == 2)
1664      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1665        if (Add->getNumOperands() == 2 &&
1666            isa<SCEVConstant>(Add->getOperand(0)))
1667          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1668                            getMulExpr(LHSC, Add->getOperand(1)));
1669
1670    ++Idx;
1671    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1672      // We found two constants, fold them together!
1673      ConstantInt *Fold = ConstantInt::get(getContext(),
1674                                           LHSC->getValue()->getValue() *
1675                                           RHSC->getValue()->getValue());
1676      Ops[0] = getConstant(Fold);
1677      Ops.erase(Ops.begin()+1);  // Erase the folded element
1678      if (Ops.size() == 1) return Ops[0];
1679      LHSC = cast<SCEVConstant>(Ops[0]);
1680    }
1681
1682    // If we are left with a constant one being multiplied, strip it off.
1683    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1684      Ops.erase(Ops.begin());
1685      --Idx;
1686    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1687      // If we have a multiply of zero, it will always be zero.
1688      return Ops[0];
1689    } else if (Ops[0]->isAllOnesValue()) {
1690      // If we have a mul by -1 of an add, try distributing the -1 among the
1691      // add operands.
1692      if (Ops.size() == 2)
1693        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1694          SmallVector<const SCEV *, 4> NewOps;
1695          bool AnyFolded = false;
1696          for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1697               I != E; ++I) {
1698            const SCEV *Mul = getMulExpr(Ops[0], *I);
1699            if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1700            NewOps.push_back(Mul);
1701          }
1702          if (AnyFolded)
1703            return getAddExpr(NewOps);
1704        }
1705    }
1706  }
1707
1708  // Skip over the add expression until we get to a multiply.
1709  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1710    ++Idx;
1711
1712  if (Ops.size() == 1)
1713    return Ops[0];
1714
1715  // If there are mul operands inline them all into this expression.
1716  if (Idx < Ops.size()) {
1717    bool DeletedMul = false;
1718    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1719      // If we have an mul, expand the mul operands onto the end of the operands
1720      // list.
1721      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1722      Ops.erase(Ops.begin()+Idx);
1723      DeletedMul = true;
1724    }
1725
1726    // If we deleted at least one mul, we added operands to the end of the list,
1727    // and they are not necessarily sorted.  Recurse to resort and resimplify
1728    // any operands we just aquired.
1729    if (DeletedMul)
1730      return getMulExpr(Ops);
1731  }
1732
1733  // If there are any add recurrences in the operands list, see if any other
1734  // added values are loop invariant.  If so, we can fold them into the
1735  // recurrence.
1736  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1737    ++Idx;
1738
1739  // Scan over all recurrences, trying to fold loop invariants into them.
1740  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1741    // Scan all of the other operands to this mul and add them to the vector if
1742    // they are loop invariant w.r.t. the recurrence.
1743    SmallVector<const SCEV *, 8> LIOps;
1744    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1745    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1746      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1747        LIOps.push_back(Ops[i]);
1748        Ops.erase(Ops.begin()+i);
1749        --i; --e;
1750      }
1751
1752    // If we found some loop invariants, fold them into the recurrence.
1753    if (!LIOps.empty()) {
1754      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1755      SmallVector<const SCEV *, 4> NewOps;
1756      NewOps.reserve(AddRec->getNumOperands());
1757      if (LIOps.size() == 1) {
1758        const SCEV *Scale = LIOps[0];
1759        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1760          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1761      } else {
1762        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1763          SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1764          MulOps.push_back(AddRec->getOperand(i));
1765          NewOps.push_back(getMulExpr(MulOps));
1766        }
1767      }
1768
1769      // It's tempting to propagate the NSW flag here, but nsw multiplication
1770      // is not associative so this isn't necessarily safe.
1771      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
1772                                         HasNUW && AddRec->hasNoUnsignedWrap(),
1773                                         /*HasNSW=*/false);
1774
1775      // If all of the other operands were loop invariant, we are done.
1776      if (Ops.size() == 1) return NewRec;
1777
1778      // Otherwise, multiply the folded AddRec by the non-liv parts.
1779      for (unsigned i = 0;; ++i)
1780        if (Ops[i] == AddRec) {
1781          Ops[i] = NewRec;
1782          break;
1783        }
1784      return getMulExpr(Ops);
1785    }
1786
1787    // Okay, if there weren't any loop invariants to be folded, check to see if
1788    // there are multiple AddRec's with the same loop induction variable being
1789    // multiplied together.  If so, we can fold them.
1790    for (unsigned OtherIdx = Idx+1;
1791         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1792      if (OtherIdx != Idx) {
1793        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1794        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1795          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1796          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1797          const SCEV *NewStart = getMulExpr(F->getStart(),
1798                                                 G->getStart());
1799          const SCEV *B = F->getStepRecurrence(*this);
1800          const SCEV *D = G->getStepRecurrence(*this);
1801          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1802                                          getMulExpr(G, B),
1803                                          getMulExpr(B, D));
1804          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1805                                               F->getLoop());
1806          if (Ops.size() == 2) return NewAddRec;
1807
1808          Ops.erase(Ops.begin()+Idx);
1809          Ops.erase(Ops.begin()+OtherIdx-1);
1810          Ops.push_back(NewAddRec);
1811          return getMulExpr(Ops);
1812        }
1813      }
1814
1815    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1816    // next one.
1817  }
1818
1819  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1820  // already have one, otherwise create a new one.
1821  FoldingSetNodeID ID;
1822  ID.AddInteger(scMulExpr);
1823  ID.AddInteger(Ops.size());
1824  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1825    ID.AddPointer(Ops[i]);
1826  void *IP = 0;
1827  SCEVMulExpr *S =
1828    static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1829  if (!S) {
1830    S = SCEVAllocator.Allocate<SCEVMulExpr>();
1831    new (S) SCEVMulExpr(ID, Ops);
1832    UniqueSCEVs.InsertNode(S, IP);
1833  }
1834  if (HasNUW) S->setHasNoUnsignedWrap(true);
1835  if (HasNSW) S->setHasNoSignedWrap(true);
1836  return S;
1837}
1838
1839/// getUDivExpr - Get a canonical unsigned division expression, or something
1840/// simpler if possible.
1841const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1842                                         const SCEV *RHS) {
1843  assert(getEffectiveSCEVType(LHS->getType()) ==
1844         getEffectiveSCEVType(RHS->getType()) &&
1845         "SCEVUDivExpr operand types don't match!");
1846
1847  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1848    if (RHSC->getValue()->equalsInt(1))
1849      return LHS;                               // X udiv 1 --> x
1850    if (RHSC->isZero())
1851      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1852
1853    // Determine if the division can be folded into the operands of
1854    // its operands.
1855    // TODO: Generalize this to non-constants by using known-bits information.
1856    const Type *Ty = LHS->getType();
1857    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1858    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1859    // For non-power-of-two values, effectively round the value up to the
1860    // nearest power of two.
1861    if (!RHSC->getValue()->getValue().isPowerOf2())
1862      ++MaxShiftAmt;
1863    const IntegerType *ExtTy =
1864      IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1865    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1866    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1867      if (const SCEVConstant *Step =
1868            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1869        if (!Step->getValue()->getValue()
1870              .urem(RHSC->getValue()->getValue()) &&
1871            getZeroExtendExpr(AR, ExtTy) ==
1872            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1873                          getZeroExtendExpr(Step, ExtTy),
1874                          AR->getLoop())) {
1875          SmallVector<const SCEV *, 4> Operands;
1876          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1877            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1878          return getAddRecExpr(Operands, AR->getLoop());
1879        }
1880    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1881    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1882      SmallVector<const SCEV *, 4> Operands;
1883      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1884        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1885      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1886        // Find an operand that's safely divisible.
1887        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1888          const SCEV *Op = M->getOperand(i);
1889          const SCEV *Div = getUDivExpr(Op, RHSC);
1890          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1891            const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1892            Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1893                                                  MOperands.end());
1894            Operands[i] = Div;
1895            return getMulExpr(Operands);
1896          }
1897        }
1898    }
1899    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1900    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1901      SmallVector<const SCEV *, 4> Operands;
1902      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1903        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1904      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1905        Operands.clear();
1906        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1907          const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1908          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1909            break;
1910          Operands.push_back(Op);
1911        }
1912        if (Operands.size() == A->getNumOperands())
1913          return getAddExpr(Operands);
1914      }
1915    }
1916
1917    // Fold if both operands are constant.
1918    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1919      Constant *LHSCV = LHSC->getValue();
1920      Constant *RHSCV = RHSC->getValue();
1921      return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1922                                                                 RHSCV)));
1923    }
1924  }
1925
1926  FoldingSetNodeID ID;
1927  ID.AddInteger(scUDivExpr);
1928  ID.AddPointer(LHS);
1929  ID.AddPointer(RHS);
1930  void *IP = 0;
1931  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1932  SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1933  new (S) SCEVUDivExpr(ID, LHS, RHS);
1934  UniqueSCEVs.InsertNode(S, IP);
1935  return S;
1936}
1937
1938
1939/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1940/// Simplify the expression as much as possible.
1941const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1942                                           const SCEV *Step, const Loop *L,
1943                                           bool HasNUW, bool HasNSW) {
1944  SmallVector<const SCEV *, 4> Operands;
1945  Operands.push_back(Start);
1946  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1947    if (StepChrec->getLoop() == L) {
1948      Operands.insert(Operands.end(), StepChrec->op_begin(),
1949                      StepChrec->op_end());
1950      return getAddRecExpr(Operands, L);
1951    }
1952
1953  Operands.push_back(Step);
1954  return getAddRecExpr(Operands, L, HasNUW, HasNSW);
1955}
1956
1957/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1958/// Simplify the expression as much as possible.
1959const SCEV *
1960ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1961                               const Loop *L,
1962                               bool HasNUW, bool HasNSW) {
1963  if (Operands.size() == 1) return Operands[0];
1964#ifndef NDEBUG
1965  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1966    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1967           getEffectiveSCEVType(Operands[0]->getType()) &&
1968           "SCEVAddRecExpr operand types don't match!");
1969#endif
1970
1971  if (Operands.back()->isZero()) {
1972    Operands.pop_back();
1973    return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0}  -->  X
1974  }
1975
1976  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1977  if (!HasNUW && HasNSW) {
1978    bool All = true;
1979    for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1980      if (!isKnownNonNegative(Operands[i])) {
1981        All = false;
1982        break;
1983      }
1984    if (All) HasNUW = true;
1985  }
1986
1987  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1988  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1989    const Loop *NestedLoop = NestedAR->getLoop();
1990    if (L->contains(NestedLoop->getHeader()) ?
1991        (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
1992        (!NestedLoop->contains(L->getHeader()) &&
1993         DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
1994      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1995                                                  NestedAR->op_end());
1996      Operands[0] = NestedAR->getStart();
1997      // AddRecs require their operands be loop-invariant with respect to their
1998      // loops. Don't perform this transformation if it would break this
1999      // requirement.
2000      bool AllInvariant = true;
2001      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2002        if (!Operands[i]->isLoopInvariant(L)) {
2003          AllInvariant = false;
2004          break;
2005        }
2006      if (AllInvariant) {
2007        NestedOperands[0] = getAddRecExpr(Operands, L);
2008        AllInvariant = true;
2009        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2010          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
2011            AllInvariant = false;
2012            break;
2013          }
2014        if (AllInvariant)
2015          // Ok, both add recurrences are valid after the transformation.
2016          return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
2017      }
2018      // Reset Operands to its original state.
2019      Operands[0] = NestedAR;
2020    }
2021  }
2022
2023  // Okay, it looks like we really DO need an addrec expr.  Check to see if we
2024  // already have one, otherwise create a new one.
2025  FoldingSetNodeID ID;
2026  ID.AddInteger(scAddRecExpr);
2027  ID.AddInteger(Operands.size());
2028  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2029    ID.AddPointer(Operands[i]);
2030  ID.AddPointer(L);
2031  void *IP = 0;
2032  SCEVAddRecExpr *S =
2033    static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2034  if (!S) {
2035    S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
2036    new (S) SCEVAddRecExpr(ID, Operands, L);
2037    UniqueSCEVs.InsertNode(S, IP);
2038  }
2039  if (HasNUW) S->setHasNoUnsignedWrap(true);
2040  if (HasNSW) S->setHasNoSignedWrap(true);
2041  return S;
2042}
2043
2044const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2045                                         const SCEV *RHS) {
2046  SmallVector<const SCEV *, 2> Ops;
2047  Ops.push_back(LHS);
2048  Ops.push_back(RHS);
2049  return getSMaxExpr(Ops);
2050}
2051
2052const SCEV *
2053ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2054  assert(!Ops.empty() && "Cannot get empty smax!");
2055  if (Ops.size() == 1) return Ops[0];
2056#ifndef NDEBUG
2057  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2058    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
2059           getEffectiveSCEVType(Ops[0]->getType()) &&
2060           "SCEVSMaxExpr operand types don't match!");
2061#endif
2062
2063  // Sort by complexity, this groups all similar expression types together.
2064  GroupByComplexity(Ops, LI);
2065
2066  // If there are any constants, fold them together.
2067  unsigned Idx = 0;
2068  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2069    ++Idx;
2070    assert(Idx < Ops.size());
2071    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2072      // We found two constants, fold them together!
2073      ConstantInt *Fold = ConstantInt::get(getContext(),
2074                              APIntOps::smax(LHSC->getValue()->getValue(),
2075                                             RHSC->getValue()->getValue()));
2076      Ops[0] = getConstant(Fold);
2077      Ops.erase(Ops.begin()+1);  // Erase the folded element
2078      if (Ops.size() == 1) return Ops[0];
2079      LHSC = cast<SCEVConstant>(Ops[0]);
2080    }
2081
2082    // If we are left with a constant minimum-int, strip it off.
2083    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2084      Ops.erase(Ops.begin());
2085      --Idx;
2086    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2087      // If we have an smax with a constant maximum-int, it will always be
2088      // maximum-int.
2089      return Ops[0];
2090    }
2091  }
2092
2093  if (Ops.size() == 1) return Ops[0];
2094
2095  // Find the first SMax
2096  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2097    ++Idx;
2098
2099  // Check to see if one of the operands is an SMax. If so, expand its operands
2100  // onto our operand list, and recurse to simplify.
2101  if (Idx < Ops.size()) {
2102    bool DeletedSMax = false;
2103    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2104      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
2105      Ops.erase(Ops.begin()+Idx);
2106      DeletedSMax = true;
2107    }
2108
2109    if (DeletedSMax)
2110      return getSMaxExpr(Ops);
2111  }
2112
2113  // Okay, check to see if the same value occurs in the operand list twice.  If
2114  // so, delete one.  Since we sorted the list, these values are required to
2115  // be adjacent.
2116  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2117    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
2118      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2119      --i; --e;
2120    }
2121
2122  if (Ops.size() == 1) return Ops[0];
2123
2124  assert(!Ops.empty() && "Reduced smax down to nothing!");
2125
2126  // Okay, it looks like we really DO need an smax expr.  Check to see if we
2127  // already have one, otherwise create a new one.
2128  FoldingSetNodeID ID;
2129  ID.AddInteger(scSMaxExpr);
2130  ID.AddInteger(Ops.size());
2131  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2132    ID.AddPointer(Ops[i]);
2133  void *IP = 0;
2134  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2135  SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
2136  new (S) SCEVSMaxExpr(ID, Ops);
2137  UniqueSCEVs.InsertNode(S, IP);
2138  return S;
2139}
2140
2141const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2142                                         const SCEV *RHS) {
2143  SmallVector<const SCEV *, 2> Ops;
2144  Ops.push_back(LHS);
2145  Ops.push_back(RHS);
2146  return getUMaxExpr(Ops);
2147}
2148
2149const SCEV *
2150ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2151  assert(!Ops.empty() && "Cannot get empty umax!");
2152  if (Ops.size() == 1) return Ops[0];
2153#ifndef NDEBUG
2154  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2155    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
2156           getEffectiveSCEVType(Ops[0]->getType()) &&
2157           "SCEVUMaxExpr operand types don't match!");
2158#endif
2159
2160  // Sort by complexity, this groups all similar expression types together.
2161  GroupByComplexity(Ops, LI);
2162
2163  // If there are any constants, fold them together.
2164  unsigned Idx = 0;
2165  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2166    ++Idx;
2167    assert(Idx < Ops.size());
2168    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2169      // We found two constants, fold them together!
2170      ConstantInt *Fold = ConstantInt::get(getContext(),
2171                              APIntOps::umax(LHSC->getValue()->getValue(),
2172                                             RHSC->getValue()->getValue()));
2173      Ops[0] = getConstant(Fold);
2174      Ops.erase(Ops.begin()+1);  // Erase the folded element
2175      if (Ops.size() == 1) return Ops[0];
2176      LHSC = cast<SCEVConstant>(Ops[0]);
2177    }
2178
2179    // If we are left with a constant minimum-int, strip it off.
2180    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2181      Ops.erase(Ops.begin());
2182      --Idx;
2183    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2184      // If we have an umax with a constant maximum-int, it will always be
2185      // maximum-int.
2186      return Ops[0];
2187    }
2188  }
2189
2190  if (Ops.size() == 1) return Ops[0];
2191
2192  // Find the first UMax
2193  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2194    ++Idx;
2195
2196  // Check to see if one of the operands is a UMax. If so, expand its operands
2197  // onto our operand list, and recurse to simplify.
2198  if (Idx < Ops.size()) {
2199    bool DeletedUMax = false;
2200    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2201      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
2202      Ops.erase(Ops.begin()+Idx);
2203      DeletedUMax = true;
2204    }
2205
2206    if (DeletedUMax)
2207      return getUMaxExpr(Ops);
2208  }
2209
2210  // Okay, check to see if the same value occurs in the operand list twice.  If
2211  // so, delete one.  Since we sorted the list, these values are required to
2212  // be adjacent.
2213  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2214    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
2215      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2216      --i; --e;
2217    }
2218
2219  if (Ops.size() == 1) return Ops[0];
2220
2221  assert(!Ops.empty() && "Reduced umax down to nothing!");
2222
2223  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2224  // already have one, otherwise create a new one.
2225  FoldingSetNodeID ID;
2226  ID.AddInteger(scUMaxExpr);
2227  ID.AddInteger(Ops.size());
2228  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2229    ID.AddPointer(Ops[i]);
2230  void *IP = 0;
2231  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2232  SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
2233  new (S) SCEVUMaxExpr(ID, Ops);
2234  UniqueSCEVs.InsertNode(S, IP);
2235  return S;
2236}
2237
2238const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2239                                         const SCEV *RHS) {
2240  // ~smax(~x, ~y) == smin(x, y).
2241  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2242}
2243
2244const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2245                                         const SCEV *RHS) {
2246  // ~umax(~x, ~y) == umin(x, y)
2247  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2248}
2249
2250const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
2251  Constant *C = ConstantExpr::getSizeOf(AllocTy);
2252  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2253    C = ConstantFoldConstantExpression(CE, TD);
2254  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2255  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2256}
2257
2258const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
2259  Constant *C = ConstantExpr::getAlignOf(AllocTy);
2260  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2261    C = ConstantFoldConstantExpression(CE, TD);
2262  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2263  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2264}
2265
2266const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
2267                                             unsigned FieldNo) {
2268  Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2269  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2270    C = ConstantFoldConstantExpression(CE, TD);
2271  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2272  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2273}
2274
2275const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
2276                                             Constant *FieldNo) {
2277  Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2278  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2279    C = ConstantFoldConstantExpression(CE, TD);
2280  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2281  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2282}
2283
2284const SCEV *ScalarEvolution::getUnknown(Value *V) {
2285  // Don't attempt to do anything other than create a SCEVUnknown object
2286  // here.  createSCEV only calls getUnknown after checking for all other
2287  // interesting possibilities, and any other code that calls getUnknown
2288  // is doing so in order to hide a value from SCEV canonicalization.
2289
2290  FoldingSetNodeID ID;
2291  ID.AddInteger(scUnknown);
2292  ID.AddPointer(V);
2293  void *IP = 0;
2294  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2295  SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
2296  new (S) SCEVUnknown(ID, V);
2297  UniqueSCEVs.InsertNode(S, IP);
2298  return S;
2299}
2300
2301//===----------------------------------------------------------------------===//
2302//            Basic SCEV Analysis and PHI Idiom Recognition Code
2303//
2304
2305/// isSCEVable - Test if values of the given type are analyzable within
2306/// the SCEV framework. This primarily includes integer types, and it
2307/// can optionally include pointer types if the ScalarEvolution class
2308/// has access to target-specific information.
2309bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2310  // Integers and pointers are always SCEVable.
2311  return Ty->isInteger() || isa<PointerType>(Ty);
2312}
2313
2314/// getTypeSizeInBits - Return the size in bits of the specified type,
2315/// for which isSCEVable must return true.
2316uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2317  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2318
2319  // If we have a TargetData, use it!
2320  if (TD)
2321    return TD->getTypeSizeInBits(Ty);
2322
2323  // Integer types have fixed sizes.
2324  if (Ty->isInteger())
2325    return Ty->getPrimitiveSizeInBits();
2326
2327  // The only other support type is pointer. Without TargetData, conservatively
2328  // assume pointers are 64-bit.
2329  assert(isa<PointerType>(Ty) && "isSCEVable permitted a non-SCEVable type!");
2330  return 64;
2331}
2332
2333/// getEffectiveSCEVType - Return a type with the same bitwidth as
2334/// the given type and which represents how SCEV will treat the given
2335/// type, for which isSCEVable must return true. For pointer types,
2336/// this is the pointer-sized integer type.
2337const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2338  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2339
2340  if (Ty->isInteger())
2341    return Ty;
2342
2343  // The only other support type is pointer.
2344  assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2345  if (TD) return TD->getIntPtrType(getContext());
2346
2347  // Without TargetData, conservatively assume pointers are 64-bit.
2348  return Type::getInt64Ty(getContext());
2349}
2350
2351const SCEV *ScalarEvolution::getCouldNotCompute() {
2352  return &CouldNotCompute;
2353}
2354
2355/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2356/// expression and create a new one.
2357const SCEV *ScalarEvolution::getSCEV(Value *V) {
2358  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2359
2360  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2361  if (I != Scalars.end()) return I->second;
2362  const SCEV *S = createSCEV(V);
2363  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2364  return S;
2365}
2366
2367/// getIntegerSCEV - Given a SCEVable type, create a constant for the
2368/// specified signed integer value and return a SCEV for the constant.
2369const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
2370  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2371  return getConstant(ConstantInt::get(ITy, Val));
2372}
2373
2374/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2375///
2376const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2377  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2378    return getConstant(
2379               cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2380
2381  const Type *Ty = V->getType();
2382  Ty = getEffectiveSCEVType(Ty);
2383  return getMulExpr(V,
2384                  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2385}
2386
2387/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2388const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2389  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2390    return getConstant(
2391                cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2392
2393  const Type *Ty = V->getType();
2394  Ty = getEffectiveSCEVType(Ty);
2395  const SCEV *AllOnes =
2396                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2397  return getMinusSCEV(AllOnes, V);
2398}
2399
2400/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2401///
2402const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2403                                          const SCEV *RHS) {
2404  // X - Y --> X + -Y
2405  return getAddExpr(LHS, getNegativeSCEV(RHS));
2406}
2407
2408/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2409/// input value to the specified type.  If the type must be extended, it is zero
2410/// extended.
2411const SCEV *
2412ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2413                                         const Type *Ty) {
2414  const Type *SrcTy = V->getType();
2415  assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
2416         (Ty->isInteger() || isa<PointerType>(Ty)) &&
2417         "Cannot truncate or zero extend with non-integer arguments!");
2418  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2419    return V;  // No conversion
2420  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2421    return getTruncateExpr(V, Ty);
2422  return getZeroExtendExpr(V, Ty);
2423}
2424
2425/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2426/// input value to the specified type.  If the type must be extended, it is sign
2427/// extended.
2428const SCEV *
2429ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2430                                         const Type *Ty) {
2431  const Type *SrcTy = V->getType();
2432  assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
2433         (Ty->isInteger() || isa<PointerType>(Ty)) &&
2434         "Cannot truncate or zero extend with non-integer arguments!");
2435  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2436    return V;  // No conversion
2437  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2438    return getTruncateExpr(V, Ty);
2439  return getSignExtendExpr(V, Ty);
2440}
2441
2442/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2443/// input value to the specified type.  If the type must be extended, it is zero
2444/// extended.  The conversion must not be narrowing.
2445const SCEV *
2446ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2447  const Type *SrcTy = V->getType();
2448  assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
2449         (Ty->isInteger() || isa<PointerType>(Ty)) &&
2450         "Cannot noop or zero extend with non-integer arguments!");
2451  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2452         "getNoopOrZeroExtend cannot truncate!");
2453  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2454    return V;  // No conversion
2455  return getZeroExtendExpr(V, Ty);
2456}
2457
2458/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2459/// input value to the specified type.  If the type must be extended, it is sign
2460/// extended.  The conversion must not be narrowing.
2461const SCEV *
2462ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2463  const Type *SrcTy = V->getType();
2464  assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
2465         (Ty->isInteger() || isa<PointerType>(Ty)) &&
2466         "Cannot noop or sign extend with non-integer arguments!");
2467  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2468         "getNoopOrSignExtend cannot truncate!");
2469  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2470    return V;  // No conversion
2471  return getSignExtendExpr(V, Ty);
2472}
2473
2474/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2475/// the input value to the specified type. If the type must be extended,
2476/// it is extended with unspecified bits. The conversion must not be
2477/// narrowing.
2478const SCEV *
2479ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2480  const Type *SrcTy = V->getType();
2481  assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
2482         (Ty->isInteger() || isa<PointerType>(Ty)) &&
2483         "Cannot noop or any extend with non-integer arguments!");
2484  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2485         "getNoopOrAnyExtend cannot truncate!");
2486  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2487    return V;  // No conversion
2488  return getAnyExtendExpr(V, Ty);
2489}
2490
2491/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2492/// input value to the specified type.  The conversion must not be widening.
2493const SCEV *
2494ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2495  const Type *SrcTy = V->getType();
2496  assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
2497         (Ty->isInteger() || isa<PointerType>(Ty)) &&
2498         "Cannot truncate or noop with non-integer arguments!");
2499  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2500         "getTruncateOrNoop cannot extend!");
2501  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2502    return V;  // No conversion
2503  return getTruncateExpr(V, Ty);
2504}
2505
2506/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2507/// the types using zero-extension, and then perform a umax operation
2508/// with them.
2509const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2510                                                        const SCEV *RHS) {
2511  const SCEV *PromotedLHS = LHS;
2512  const SCEV *PromotedRHS = RHS;
2513
2514  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2515    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2516  else
2517    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2518
2519  return getUMaxExpr(PromotedLHS, PromotedRHS);
2520}
2521
2522/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2523/// the types using zero-extension, and then perform a umin operation
2524/// with them.
2525const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2526                                                        const SCEV *RHS) {
2527  const SCEV *PromotedLHS = LHS;
2528  const SCEV *PromotedRHS = RHS;
2529
2530  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2531    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2532  else
2533    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2534
2535  return getUMinExpr(PromotedLHS, PromotedRHS);
2536}
2537
2538/// PushDefUseChildren - Push users of the given Instruction
2539/// onto the given Worklist.
2540static void
2541PushDefUseChildren(Instruction *I,
2542                   SmallVectorImpl<Instruction *> &Worklist) {
2543  // Push the def-use children onto the Worklist stack.
2544  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2545       UI != UE; ++UI)
2546    Worklist.push_back(cast<Instruction>(UI));
2547}
2548
2549/// ForgetSymbolicValue - This looks up computed SCEV values for all
2550/// instructions that depend on the given instruction and removes them from
2551/// the Scalars map if they reference SymName. This is used during PHI
2552/// resolution.
2553void
2554ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) {
2555  SmallVector<Instruction *, 16> Worklist;
2556  PushDefUseChildren(I, Worklist);
2557
2558  SmallPtrSet<Instruction *, 8> Visited;
2559  Visited.insert(I);
2560  while (!Worklist.empty()) {
2561    Instruction *I = Worklist.pop_back_val();
2562    if (!Visited.insert(I)) continue;
2563
2564    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
2565      Scalars.find(static_cast<Value *>(I));
2566    if (It != Scalars.end()) {
2567      // Short-circuit the def-use traversal if the symbolic name
2568      // ceases to appear in expressions.
2569      if (!It->second->hasOperand(SymName))
2570        continue;
2571
2572      // SCEVUnknown for a PHI either means that it has an unrecognized
2573      // structure, or it's a PHI that's in the progress of being computed
2574      // by createNodeForPHI.  In the former case, additional loop trip
2575      // count information isn't going to change anything. In the later
2576      // case, createNodeForPHI will perform the necessary updates on its
2577      // own when it gets to that point.
2578      if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
2579        ValuesAtScopes.erase(It->second);
2580        Scalars.erase(It);
2581      }
2582    }
2583
2584    PushDefUseChildren(I, Worklist);
2585  }
2586}
2587
2588/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2589/// a loop header, making it a potential recurrence, or it doesn't.
2590///
2591const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2592  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2593    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2594      if (L->getHeader() == PN->getParent()) {
2595        // If it lives in the loop header, it has two incoming values, one
2596        // from outside the loop, and one from inside.
2597        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2598        unsigned BackEdge     = IncomingEdge^1;
2599
2600        // While we are analyzing this PHI node, handle its value symbolically.
2601        const SCEV *SymbolicName = getUnknown(PN);
2602        assert(Scalars.find(PN) == Scalars.end() &&
2603               "PHI node already processed?");
2604        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2605
2606        // Using this symbolic name for the PHI, analyze the value coming around
2607        // the back-edge.
2608        Value *BEValueV = PN->getIncomingValue(BackEdge);
2609        const SCEV *BEValue = getSCEV(BEValueV);
2610
2611        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2612        // has a special value for the first iteration of the loop.
2613
2614        // If the value coming around the backedge is an add with the symbolic
2615        // value we just inserted, then we found a simple induction variable!
2616        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2617          // If there is a single occurrence of the symbolic value, replace it
2618          // with a recurrence.
2619          unsigned FoundIndex = Add->getNumOperands();
2620          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2621            if (Add->getOperand(i) == SymbolicName)
2622              if (FoundIndex == e) {
2623                FoundIndex = i;
2624                break;
2625              }
2626
2627          if (FoundIndex != Add->getNumOperands()) {
2628            // Create an add with everything but the specified operand.
2629            SmallVector<const SCEV *, 8> Ops;
2630            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2631              if (i != FoundIndex)
2632                Ops.push_back(Add->getOperand(i));
2633            const SCEV *Accum = getAddExpr(Ops);
2634
2635            // This is not a valid addrec if the step amount is varying each
2636            // loop iteration, but is not itself an addrec in this loop.
2637            if (Accum->isLoopInvariant(L) ||
2638                (isa<SCEVAddRecExpr>(Accum) &&
2639                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2640              bool HasNUW = false;
2641              bool HasNSW = false;
2642
2643              // If the increment doesn't overflow, then neither the addrec nor
2644              // the post-increment will overflow.
2645              if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
2646                if (OBO->hasNoUnsignedWrap())
2647                  HasNUW = true;
2648                if (OBO->hasNoSignedWrap())
2649                  HasNSW = true;
2650              }
2651
2652              const SCEV *StartVal =
2653                getSCEV(PN->getIncomingValue(IncomingEdge));
2654              const SCEV *PHISCEV =
2655                getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
2656
2657              // Since the no-wrap flags are on the increment, they apply to the
2658              // post-incremented value as well.
2659              if (Accum->isLoopInvariant(L))
2660                (void)getAddRecExpr(getAddExpr(StartVal, Accum),
2661                                    Accum, L, HasNUW, HasNSW);
2662
2663              // Okay, for the entire analysis of this edge we assumed the PHI
2664              // to be symbolic.  We now need to go back and purge all of the
2665              // entries for the scalars that use the symbolic expression.
2666              ForgetSymbolicName(PN, SymbolicName);
2667              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2668              return PHISCEV;
2669            }
2670          }
2671        } else if (const SCEVAddRecExpr *AddRec =
2672                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2673          // Otherwise, this could be a loop like this:
2674          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2675          // In this case, j = {1,+,1}  and BEValue is j.
2676          // Because the other in-value of i (0) fits the evolution of BEValue
2677          // i really is an addrec evolution.
2678          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2679            const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2680
2681            // If StartVal = j.start - j.stride, we can use StartVal as the
2682            // initial step of the addrec evolution.
2683            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2684                                            AddRec->getOperand(1))) {
2685              const SCEV *PHISCEV =
2686                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2687
2688              // Okay, for the entire analysis of this edge we assumed the PHI
2689              // to be symbolic.  We now need to go back and purge all of the
2690              // entries for the scalars that use the symbolic expression.
2691              ForgetSymbolicName(PN, SymbolicName);
2692              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2693              return PHISCEV;
2694            }
2695          }
2696        }
2697
2698        return SymbolicName;
2699      }
2700
2701  // It's tempting to recognize PHIs with a unique incoming value, however
2702  // this leads passes like indvars to break LCSSA form. Fortunately, such
2703  // PHIs are rare, as instcombine zaps them.
2704
2705  // If it's not a loop phi, we can't handle it yet.
2706  return getUnknown(PN);
2707}
2708
2709/// createNodeForGEP - Expand GEP instructions into add and multiply
2710/// operations. This allows them to be analyzed by regular SCEV code.
2711///
2712const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
2713
2714  bool InBounds = GEP->isInBounds();
2715  const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
2716  Value *Base = GEP->getOperand(0);
2717  // Don't attempt to analyze GEPs over unsized objects.
2718  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2719    return getUnknown(GEP);
2720  const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2721  gep_type_iterator GTI = gep_type_begin(GEP);
2722  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2723                                      E = GEP->op_end();
2724       I != E; ++I) {
2725    Value *Index = *I;
2726    // Compute the (potentially symbolic) offset in bytes for this index.
2727    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2728      // For a struct, add the member offset.
2729      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2730      TotalOffset = getAddExpr(TotalOffset,
2731                               getOffsetOfExpr(STy, FieldNo),
2732                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2733    } else {
2734      // For an array, add the element offset, explicitly scaled.
2735      const SCEV *LocalOffset = getSCEV(Index);
2736      // Getelementptr indicies are signed.
2737      LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2738      // Lower "inbounds" GEPs to NSW arithmetic.
2739      LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
2740                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2741      TotalOffset = getAddExpr(TotalOffset, LocalOffset,
2742                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2743    }
2744  }
2745  return getAddExpr(getSCEV(Base), TotalOffset,
2746                    /*HasNUW=*/false, /*HasNSW=*/InBounds);
2747}
2748
2749/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2750/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2751/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2752/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2753uint32_t
2754ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2755  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2756    return C->getValue()->getValue().countTrailingZeros();
2757
2758  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2759    return std::min(GetMinTrailingZeros(T->getOperand()),
2760                    (uint32_t)getTypeSizeInBits(T->getType()));
2761
2762  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2763    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2764    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2765             getTypeSizeInBits(E->getType()) : OpRes;
2766  }
2767
2768  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2769    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2770    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2771             getTypeSizeInBits(E->getType()) : OpRes;
2772  }
2773
2774  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2775    // The result is the min of all operands results.
2776    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2777    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2778      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2779    return MinOpRes;
2780  }
2781
2782  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2783    // The result is the sum of all operands results.
2784    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2785    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2786    for (unsigned i = 1, e = M->getNumOperands();
2787         SumOpRes != BitWidth && i != e; ++i)
2788      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2789                          BitWidth);
2790    return SumOpRes;
2791  }
2792
2793  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2794    // The result is the min of all operands results.
2795    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2796    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2797      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2798    return MinOpRes;
2799  }
2800
2801  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2802    // The result is the min of all operands results.
2803    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2804    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2805      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2806    return MinOpRes;
2807  }
2808
2809  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2810    // The result is the min of all operands results.
2811    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2812    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2813      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2814    return MinOpRes;
2815  }
2816
2817  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2818    // For a SCEVUnknown, ask ValueTracking.
2819    unsigned BitWidth = getTypeSizeInBits(U->getType());
2820    APInt Mask = APInt::getAllOnesValue(BitWidth);
2821    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2822    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2823    return Zeros.countTrailingOnes();
2824  }
2825
2826  // SCEVUDivExpr
2827  return 0;
2828}
2829
2830/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2831///
2832ConstantRange
2833ScalarEvolution::getUnsignedRange(const SCEV *S) {
2834
2835  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2836    return ConstantRange(C->getValue()->getValue());
2837
2838  unsigned BitWidth = getTypeSizeInBits(S->getType());
2839  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2840
2841  // If the value has known zeros, the maximum unsigned value will have those
2842  // known zeros as well.
2843  uint32_t TZ = GetMinTrailingZeros(S);
2844  if (TZ != 0)
2845    ConservativeResult =
2846      ConstantRange(APInt::getMinValue(BitWidth),
2847                    APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
2848
2849  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2850    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2851    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2852      X = X.add(getUnsignedRange(Add->getOperand(i)));
2853    return ConservativeResult.intersectWith(X);
2854  }
2855
2856  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2857    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2858    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2859      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2860    return ConservativeResult.intersectWith(X);
2861  }
2862
2863  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2864    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2865    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2866      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2867    return ConservativeResult.intersectWith(X);
2868  }
2869
2870  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2871    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2872    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2873      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2874    return ConservativeResult.intersectWith(X);
2875  }
2876
2877  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2878    ConstantRange X = getUnsignedRange(UDiv->getLHS());
2879    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2880    return ConservativeResult.intersectWith(X.udiv(Y));
2881  }
2882
2883  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2884    ConstantRange X = getUnsignedRange(ZExt->getOperand());
2885    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
2886  }
2887
2888  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2889    ConstantRange X = getUnsignedRange(SExt->getOperand());
2890    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
2891  }
2892
2893  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2894    ConstantRange X = getUnsignedRange(Trunc->getOperand());
2895    return ConservativeResult.intersectWith(X.truncate(BitWidth));
2896  }
2897
2898  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2899    // If there's no unsigned wrap, the value will never be less than its
2900    // initial value.
2901    if (AddRec->hasNoUnsignedWrap())
2902      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
2903        ConservativeResult =
2904          ConstantRange(C->getValue()->getValue(),
2905                        APInt(getTypeSizeInBits(C->getType()), 0));
2906
2907    // TODO: non-affine addrec
2908    if (AddRec->isAffine()) {
2909      const Type *Ty = AddRec->getType();
2910      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2911      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
2912          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
2913        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2914
2915        const SCEV *Start = AddRec->getStart();
2916        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2917
2918        // Check for overflow.
2919        if (!AddRec->hasNoUnsignedWrap())
2920          return ConservativeResult;
2921
2922        ConstantRange StartRange = getUnsignedRange(Start);
2923        ConstantRange EndRange = getUnsignedRange(End);
2924        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2925                                   EndRange.getUnsignedMin());
2926        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2927                                   EndRange.getUnsignedMax());
2928        if (Min.isMinValue() && Max.isMaxValue())
2929          return ConservativeResult;
2930        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
2931      }
2932    }
2933
2934    return ConservativeResult;
2935  }
2936
2937  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2938    // For a SCEVUnknown, ask ValueTracking.
2939    unsigned BitWidth = getTypeSizeInBits(U->getType());
2940    APInt Mask = APInt::getAllOnesValue(BitWidth);
2941    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2942    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2943    if (Ones == ~Zeros + 1)
2944      return ConservativeResult;
2945    return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
2946  }
2947
2948  return ConservativeResult;
2949}
2950
2951/// getSignedRange - Determine the signed range for a particular SCEV.
2952///
2953ConstantRange
2954ScalarEvolution::getSignedRange(const SCEV *S) {
2955
2956  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2957    return ConstantRange(C->getValue()->getValue());
2958
2959  unsigned BitWidth = getTypeSizeInBits(S->getType());
2960  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2961
2962  // If the value has known zeros, the maximum signed value will have those
2963  // known zeros as well.
2964  uint32_t TZ = GetMinTrailingZeros(S);
2965  if (TZ != 0)
2966    ConservativeResult =
2967      ConstantRange(APInt::getSignedMinValue(BitWidth),
2968                    APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
2969
2970  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2971    ConstantRange X = getSignedRange(Add->getOperand(0));
2972    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2973      X = X.add(getSignedRange(Add->getOperand(i)));
2974    return ConservativeResult.intersectWith(X);
2975  }
2976
2977  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2978    ConstantRange X = getSignedRange(Mul->getOperand(0));
2979    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2980      X = X.multiply(getSignedRange(Mul->getOperand(i)));
2981    return ConservativeResult.intersectWith(X);
2982  }
2983
2984  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2985    ConstantRange X = getSignedRange(SMax->getOperand(0));
2986    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2987      X = X.smax(getSignedRange(SMax->getOperand(i)));
2988    return ConservativeResult.intersectWith(X);
2989  }
2990
2991  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2992    ConstantRange X = getSignedRange(UMax->getOperand(0));
2993    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2994      X = X.umax(getSignedRange(UMax->getOperand(i)));
2995    return ConservativeResult.intersectWith(X);
2996  }
2997
2998  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2999    ConstantRange X = getSignedRange(UDiv->getLHS());
3000    ConstantRange Y = getSignedRange(UDiv->getRHS());
3001    return ConservativeResult.intersectWith(X.udiv(Y));
3002  }
3003
3004  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3005    ConstantRange X = getSignedRange(ZExt->getOperand());
3006    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
3007  }
3008
3009  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3010    ConstantRange X = getSignedRange(SExt->getOperand());
3011    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
3012  }
3013
3014  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3015    ConstantRange X = getSignedRange(Trunc->getOperand());
3016    return ConservativeResult.intersectWith(X.truncate(BitWidth));
3017  }
3018
3019  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3020    // If there's no signed wrap, and all the operands have the same sign or
3021    // zero, the value won't ever change sign.
3022    if (AddRec->hasNoSignedWrap()) {
3023      bool AllNonNeg = true;
3024      bool AllNonPos = true;
3025      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3026        if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3027        if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3028      }
3029      if (AllNonNeg)
3030        ConservativeResult = ConservativeResult.intersectWith(
3031          ConstantRange(APInt(BitWidth, 0),
3032                        APInt::getSignedMinValue(BitWidth)));
3033      else if (AllNonPos)
3034        ConservativeResult = ConservativeResult.intersectWith(
3035          ConstantRange(APInt::getSignedMinValue(BitWidth),
3036                        APInt(BitWidth, 1)));
3037    }
3038
3039    // TODO: non-affine addrec
3040    if (AddRec->isAffine()) {
3041      const Type *Ty = AddRec->getType();
3042      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3043      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3044          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3045        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3046
3047        const SCEV *Start = AddRec->getStart();
3048        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
3049
3050        // Check for overflow.
3051        if (!AddRec->hasNoSignedWrap())
3052          return ConservativeResult;
3053
3054        ConstantRange StartRange = getSignedRange(Start);
3055        ConstantRange EndRange = getSignedRange(End);
3056        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3057                                   EndRange.getSignedMin());
3058        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3059                                   EndRange.getSignedMax());
3060        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3061          return ConservativeResult;
3062        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
3063      }
3064    }
3065
3066    return ConservativeResult;
3067  }
3068
3069  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3070    // For a SCEVUnknown, ask ValueTracking.
3071    if (!U->getValue()->getType()->isInteger() && !TD)
3072      return ConservativeResult;
3073    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3074    if (NS == 1)
3075      return ConservativeResult;
3076    return ConservativeResult.intersectWith(
3077      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3078                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
3079  }
3080
3081  return ConservativeResult;
3082}
3083
3084/// createSCEV - We know that there is no SCEV for the specified value.
3085/// Analyze the expression.
3086///
3087const SCEV *ScalarEvolution::createSCEV(Value *V) {
3088  if (!isSCEVable(V->getType()))
3089    return getUnknown(V);
3090
3091  unsigned Opcode = Instruction::UserOp1;
3092  if (Instruction *I = dyn_cast<Instruction>(V))
3093    Opcode = I->getOpcode();
3094  else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3095    Opcode = CE->getOpcode();
3096  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3097    return getConstant(CI);
3098  else if (isa<ConstantPointerNull>(V))
3099    return getIntegerSCEV(0, V->getType());
3100  else if (isa<UndefValue>(V))
3101    return getIntegerSCEV(0, V->getType());
3102  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3103    return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3104  else
3105    return getUnknown(V);
3106
3107  Operator *U = cast<Operator>(V);
3108  switch (Opcode) {
3109  case Instruction::Add:
3110    // Don't transfer the NSW and NUW bits from the Add instruction to the
3111    // Add expression, because the Instruction may be guarded by control
3112    // flow and the no-overflow bits may not be valid for the expression in
3113    // any context.
3114    return getAddExpr(getSCEV(U->getOperand(0)),
3115                      getSCEV(U->getOperand(1)));
3116  case Instruction::Mul:
3117    // Don't transfer the NSW and NUW bits from the Mul instruction to the
3118    // Mul expression, as with Add.
3119    return getMulExpr(getSCEV(U->getOperand(0)),
3120                      getSCEV(U->getOperand(1)));
3121  case Instruction::UDiv:
3122    return getUDivExpr(getSCEV(U->getOperand(0)),
3123                       getSCEV(U->getOperand(1)));
3124  case Instruction::Sub:
3125    return getMinusSCEV(getSCEV(U->getOperand(0)),
3126                        getSCEV(U->getOperand(1)));
3127  case Instruction::And:
3128    // For an expression like x&255 that merely masks off the high bits,
3129    // use zext(trunc(x)) as the SCEV expression.
3130    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3131      if (CI->isNullValue())
3132        return getSCEV(U->getOperand(1));
3133      if (CI->isAllOnesValue())
3134        return getSCEV(U->getOperand(0));
3135      const APInt &A = CI->getValue();
3136
3137      // Instcombine's ShrinkDemandedConstant may strip bits out of
3138      // constants, obscuring what would otherwise be a low-bits mask.
3139      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3140      // knew about to reconstruct a low-bits mask value.
3141      unsigned LZ = A.countLeadingZeros();
3142      unsigned BitWidth = A.getBitWidth();
3143      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3144      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3145      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3146
3147      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3148
3149      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3150        return
3151          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3152                                IntegerType::get(getContext(), BitWidth - LZ)),
3153                            U->getType());
3154    }
3155    break;
3156
3157  case Instruction::Or:
3158    // If the RHS of the Or is a constant, we may have something like:
3159    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
3160    // optimizations will transparently handle this case.
3161    //
3162    // In order for this transformation to be safe, the LHS must be of the
3163    // form X*(2^n) and the Or constant must be less than 2^n.
3164    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3165      const SCEV *LHS = getSCEV(U->getOperand(0));
3166      const APInt &CIVal = CI->getValue();
3167      if (GetMinTrailingZeros(LHS) >=
3168          (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3169        // Build a plain add SCEV.
3170        const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3171        // If the LHS of the add was an addrec and it has no-wrap flags,
3172        // transfer the no-wrap flags, since an or won't introduce a wrap.
3173        if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3174          const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3175          if (OldAR->hasNoUnsignedWrap())
3176            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
3177          if (OldAR->hasNoSignedWrap())
3178            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
3179        }
3180        return S;
3181      }
3182    }
3183    break;
3184  case Instruction::Xor:
3185    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3186      // If the RHS of the xor is a signbit, then this is just an add.
3187      // Instcombine turns add of signbit into xor as a strength reduction step.
3188      if (CI->getValue().isSignBit())
3189        return getAddExpr(getSCEV(U->getOperand(0)),
3190                          getSCEV(U->getOperand(1)));
3191
3192      // If the RHS of xor is -1, then this is a not operation.
3193      if (CI->isAllOnesValue())
3194        return getNotSCEV(getSCEV(U->getOperand(0)));
3195
3196      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3197      // This is a variant of the check for xor with -1, and it handles
3198      // the case where instcombine has trimmed non-demanded bits out
3199      // of an xor with -1.
3200      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3201        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3202          if (BO->getOpcode() == Instruction::And &&
3203              LCI->getValue() == CI->getValue())
3204            if (const SCEVZeroExtendExpr *Z =
3205                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3206              const Type *UTy = U->getType();
3207              const SCEV *Z0 = Z->getOperand();
3208              const Type *Z0Ty = Z0->getType();
3209              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3210
3211              // If C is a low-bits mask, the zero extend is zerving to
3212              // mask off the high bits. Complement the operand and
3213              // re-apply the zext.
3214              if (APIntOps::isMask(Z0TySize, CI->getValue()))
3215                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3216
3217              // If C is a single bit, it may be in the sign-bit position
3218              // before the zero-extend. In this case, represent the xor
3219              // using an add, which is equivalent, and re-apply the zext.
3220              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
3221              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3222                  Trunc.isSignBit())
3223                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3224                                         UTy);
3225            }
3226    }
3227    break;
3228
3229  case Instruction::Shl:
3230    // Turn shift left of a constant amount into a multiply.
3231    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3232      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3233      Constant *X = ConstantInt::get(getContext(),
3234        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3235      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3236    }
3237    break;
3238
3239  case Instruction::LShr:
3240    // Turn logical shift right of a constant into a unsigned divide.
3241    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3242      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3243      Constant *X = ConstantInt::get(getContext(),
3244        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3245      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3246    }
3247    break;
3248
3249  case Instruction::AShr:
3250    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3251    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3252      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
3253        if (L->getOpcode() == Instruction::Shl &&
3254            L->getOperand(1) == U->getOperand(1)) {
3255          unsigned BitWidth = getTypeSizeInBits(U->getType());
3256          uint64_t Amt = BitWidth - CI->getZExtValue();
3257          if (Amt == BitWidth)
3258            return getSCEV(L->getOperand(0));       // shift by zero --> noop
3259          if (Amt > BitWidth)
3260            return getIntegerSCEV(0, U->getType()); // value is undefined
3261          return
3262            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3263                                           IntegerType::get(getContext(), Amt)),
3264                                 U->getType());
3265        }
3266    break;
3267
3268  case Instruction::Trunc:
3269    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3270
3271  case Instruction::ZExt:
3272    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3273
3274  case Instruction::SExt:
3275    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3276
3277  case Instruction::BitCast:
3278    // BitCasts are no-op casts so we just eliminate the cast.
3279    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3280      return getSCEV(U->getOperand(0));
3281    break;
3282
3283  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3284  // lead to pointer expressions which cannot safely be expanded to GEPs,
3285  // because ScalarEvolution doesn't respect the GEP aliasing rules when
3286  // simplifying integer expressions.
3287
3288  case Instruction::GetElementPtr:
3289    return createNodeForGEP(cast<GEPOperator>(U));
3290
3291  case Instruction::PHI:
3292    return createNodeForPHI(cast<PHINode>(U));
3293
3294  case Instruction::Select:
3295    // This could be a smax or umax that was lowered earlier.
3296    // Try to recover it.
3297    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3298      Value *LHS = ICI->getOperand(0);
3299      Value *RHS = ICI->getOperand(1);
3300      switch (ICI->getPredicate()) {
3301      case ICmpInst::ICMP_SLT:
3302      case ICmpInst::ICMP_SLE:
3303        std::swap(LHS, RHS);
3304        // fall through
3305      case ICmpInst::ICMP_SGT:
3306      case ICmpInst::ICMP_SGE:
3307        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
3308          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
3309        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
3310          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
3311        break;
3312      case ICmpInst::ICMP_ULT:
3313      case ICmpInst::ICMP_ULE:
3314        std::swap(LHS, RHS);
3315        // fall through
3316      case ICmpInst::ICMP_UGT:
3317      case ICmpInst::ICMP_UGE:
3318        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
3319          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
3320        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
3321          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
3322        break;
3323      case ICmpInst::ICMP_NE:
3324        // n != 0 ? n : 1  ->  umax(n, 1)
3325        if (LHS == U->getOperand(1) &&
3326            isa<ConstantInt>(U->getOperand(2)) &&
3327            cast<ConstantInt>(U->getOperand(2))->isOne() &&
3328            isa<ConstantInt>(RHS) &&
3329            cast<ConstantInt>(RHS)->isZero())
3330          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
3331        break;
3332      case ICmpInst::ICMP_EQ:
3333        // n == 0 ? 1 : n  ->  umax(n, 1)
3334        if (LHS == U->getOperand(2) &&
3335            isa<ConstantInt>(U->getOperand(1)) &&
3336            cast<ConstantInt>(U->getOperand(1))->isOne() &&
3337            isa<ConstantInt>(RHS) &&
3338            cast<ConstantInt>(RHS)->isZero())
3339          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
3340        break;
3341      default:
3342        break;
3343      }
3344    }
3345
3346  default: // We cannot analyze this expression.
3347    break;
3348  }
3349
3350  return getUnknown(V);
3351}
3352
3353
3354
3355//===----------------------------------------------------------------------===//
3356//                   Iteration Count Computation Code
3357//
3358
3359/// getBackedgeTakenCount - If the specified loop has a predictable
3360/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3361/// object. The backedge-taken count is the number of times the loop header
3362/// will be branched to from within the loop. This is one less than the
3363/// trip count of the loop, since it doesn't count the first iteration,
3364/// when the header is branched to from outside the loop.
3365///
3366/// Note that it is not valid to call this method on a loop without a
3367/// loop-invariant backedge-taken count (see
3368/// hasLoopInvariantBackedgeTakenCount).
3369///
3370const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3371  return getBackedgeTakenInfo(L).Exact;
3372}
3373
3374/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3375/// return the least SCEV value that is known never to be less than the
3376/// actual backedge taken count.
3377const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3378  return getBackedgeTakenInfo(L).Max;
3379}
3380
3381/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3382/// onto the given Worklist.
3383static void
3384PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3385  BasicBlock *Header = L->getHeader();
3386
3387  // Push all Loop-header PHIs onto the Worklist stack.
3388  for (BasicBlock::iterator I = Header->begin();
3389       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3390    Worklist.push_back(PN);
3391}
3392
3393const ScalarEvolution::BackedgeTakenInfo &
3394ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3395  // Initially insert a CouldNotCompute for this loop. If the insertion
3396  // succeeds, procede to actually compute a backedge-taken count and
3397  // update the value. The temporary CouldNotCompute value tells SCEV
3398  // code elsewhere that it shouldn't attempt to request a new
3399  // backedge-taken count, which could result in infinite recursion.
3400  std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
3401    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3402  if (Pair.second) {
3403    BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
3404    if (BECount.Exact != getCouldNotCompute()) {
3405      assert(BECount.Exact->isLoopInvariant(L) &&
3406             BECount.Max->isLoopInvariant(L) &&
3407             "Computed backedge-taken count isn't loop invariant for loop!");
3408      ++NumTripCountsComputed;
3409
3410      // Update the value in the map.
3411      Pair.first->second = BECount;
3412    } else {
3413      if (BECount.Max != getCouldNotCompute())
3414        // Update the value in the map.
3415        Pair.first->second = BECount;
3416      if (isa<PHINode>(L->getHeader()->begin()))
3417        // Only count loops that have phi nodes as not being computable.
3418        ++NumTripCountsNotComputed;
3419    }
3420
3421    // Now that we know more about the trip count for this loop, forget any
3422    // existing SCEV values for PHI nodes in this loop since they are only
3423    // conservative estimates made without the benefit of trip count
3424    // information. This is similar to the code in forgetLoop, except that
3425    // it handles SCEVUnknown PHI nodes specially.
3426    if (BECount.hasAnyInfo()) {
3427      SmallVector<Instruction *, 16> Worklist;
3428      PushLoopPHIs(L, Worklist);
3429
3430      SmallPtrSet<Instruction *, 8> Visited;
3431      while (!Worklist.empty()) {
3432        Instruction *I = Worklist.pop_back_val();
3433        if (!Visited.insert(I)) continue;
3434
3435        std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3436          Scalars.find(static_cast<Value *>(I));
3437        if (It != Scalars.end()) {
3438          // SCEVUnknown for a PHI either means that it has an unrecognized
3439          // structure, or it's a PHI that's in the progress of being computed
3440          // by createNodeForPHI.  In the former case, additional loop trip
3441          // count information isn't going to change anything. In the later
3442          // case, createNodeForPHI will perform the necessary updates on its
3443          // own when it gets to that point.
3444          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
3445            ValuesAtScopes.erase(It->second);
3446            Scalars.erase(It);
3447          }
3448          if (PHINode *PN = dyn_cast<PHINode>(I))
3449            ConstantEvolutionLoopExitValue.erase(PN);
3450        }
3451
3452        PushDefUseChildren(I, Worklist);
3453      }
3454    }
3455  }
3456  return Pair.first->second;
3457}
3458
3459/// forgetLoop - This method should be called by the client when it has
3460/// changed a loop in a way that may effect ScalarEvolution's ability to
3461/// compute a trip count, or if the loop is deleted.
3462void ScalarEvolution::forgetLoop(const Loop *L) {
3463  // Drop any stored trip count value.
3464  BackedgeTakenCounts.erase(L);
3465
3466  // Drop information about expressions based on loop-header PHIs.
3467  SmallVector<Instruction *, 16> Worklist;
3468  PushLoopPHIs(L, Worklist);
3469
3470  SmallPtrSet<Instruction *, 8> Visited;
3471  while (!Worklist.empty()) {
3472    Instruction *I = Worklist.pop_back_val();
3473    if (!Visited.insert(I)) continue;
3474
3475    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3476      Scalars.find(static_cast<Value *>(I));
3477    if (It != Scalars.end()) {
3478      ValuesAtScopes.erase(It->second);
3479      Scalars.erase(It);
3480      if (PHINode *PN = dyn_cast<PHINode>(I))
3481        ConstantEvolutionLoopExitValue.erase(PN);
3482    }
3483
3484    PushDefUseChildren(I, Worklist);
3485  }
3486}
3487
3488/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3489/// of the specified loop will execute.
3490ScalarEvolution::BackedgeTakenInfo
3491ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3492  SmallVector<BasicBlock *, 8> ExitingBlocks;
3493  L->getExitingBlocks(ExitingBlocks);
3494
3495  // Examine all exits and pick the most conservative values.
3496  const SCEV *BECount = getCouldNotCompute();
3497  const SCEV *MaxBECount = getCouldNotCompute();
3498  bool CouldNotComputeBECount = false;
3499  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3500    BackedgeTakenInfo NewBTI =
3501      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3502
3503    if (NewBTI.Exact == getCouldNotCompute()) {
3504      // We couldn't compute an exact value for this exit, so
3505      // we won't be able to compute an exact value for the loop.
3506      CouldNotComputeBECount = true;
3507      BECount = getCouldNotCompute();
3508    } else if (!CouldNotComputeBECount) {
3509      if (BECount == getCouldNotCompute())
3510        BECount = NewBTI.Exact;
3511      else
3512        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3513    }
3514    if (MaxBECount == getCouldNotCompute())
3515      MaxBECount = NewBTI.Max;
3516    else if (NewBTI.Max != getCouldNotCompute())
3517      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3518  }
3519
3520  return BackedgeTakenInfo(BECount, MaxBECount);
3521}
3522
3523/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3524/// of the specified loop will execute if it exits via the specified block.
3525ScalarEvolution::BackedgeTakenInfo
3526ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3527                                                   BasicBlock *ExitingBlock) {
3528
3529  // Okay, we've chosen an exiting block.  See what condition causes us to
3530  // exit at this block.
3531  //
3532  // FIXME: we should be able to handle switch instructions (with a single exit)
3533  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3534  if (ExitBr == 0) return getCouldNotCompute();
3535  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3536
3537  // At this point, we know we have a conditional branch that determines whether
3538  // the loop is exited.  However, we don't know if the branch is executed each
3539  // time through the loop.  If not, then the execution count of the branch will
3540  // not be equal to the trip count of the loop.
3541  //
3542  // Currently we check for this by checking to see if the Exit branch goes to
3543  // the loop header.  If so, we know it will always execute the same number of
3544  // times as the loop.  We also handle the case where the exit block *is* the
3545  // loop header.  This is common for un-rotated loops.
3546  //
3547  // If both of those tests fail, walk up the unique predecessor chain to the
3548  // header, stopping if there is an edge that doesn't exit the loop. If the
3549  // header is reached, the execution count of the branch will be equal to the
3550  // trip count of the loop.
3551  //
3552  //  More extensive analysis could be done to handle more cases here.
3553  //
3554  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3555      ExitBr->getSuccessor(1) != L->getHeader() &&
3556      ExitBr->getParent() != L->getHeader()) {
3557    // The simple checks failed, try climbing the unique predecessor chain
3558    // up to the header.
3559    bool Ok = false;
3560    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3561      BasicBlock *Pred = BB->getUniquePredecessor();
3562      if (!Pred)
3563        return getCouldNotCompute();
3564      TerminatorInst *PredTerm = Pred->getTerminator();
3565      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3566        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3567        if (PredSucc == BB)
3568          continue;
3569        // If the predecessor has a successor that isn't BB and isn't
3570        // outside the loop, assume the worst.
3571        if (L->contains(PredSucc))
3572          return getCouldNotCompute();
3573      }
3574      if (Pred == L->getHeader()) {
3575        Ok = true;
3576        break;
3577      }
3578      BB = Pred;
3579    }
3580    if (!Ok)
3581      return getCouldNotCompute();
3582  }
3583
3584  // Procede to the next level to examine the exit condition expression.
3585  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3586                                               ExitBr->getSuccessor(0),
3587                                               ExitBr->getSuccessor(1));
3588}
3589
3590/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3591/// backedge of the specified loop will execute if its exit condition
3592/// were a conditional branch of ExitCond, TBB, and FBB.
3593ScalarEvolution::BackedgeTakenInfo
3594ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3595                                                       Value *ExitCond,
3596                                                       BasicBlock *TBB,
3597                                                       BasicBlock *FBB) {
3598  // Check if the controlling expression for this loop is an And or Or.
3599  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3600    if (BO->getOpcode() == Instruction::And) {
3601      // Recurse on the operands of the and.
3602      BackedgeTakenInfo BTI0 =
3603        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3604      BackedgeTakenInfo BTI1 =
3605        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3606      const SCEV *BECount = getCouldNotCompute();
3607      const SCEV *MaxBECount = getCouldNotCompute();
3608      if (L->contains(TBB)) {
3609        // Both conditions must be true for the loop to continue executing.
3610        // Choose the less conservative count.
3611        if (BTI0.Exact == getCouldNotCompute() ||
3612            BTI1.Exact == getCouldNotCompute())
3613          BECount = getCouldNotCompute();
3614        else
3615          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3616        if (BTI0.Max == getCouldNotCompute())
3617          MaxBECount = BTI1.Max;
3618        else if (BTI1.Max == getCouldNotCompute())
3619          MaxBECount = BTI0.Max;
3620        else
3621          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3622      } else {
3623        // Both conditions must be true for the loop to exit.
3624        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3625        if (BTI0.Exact != getCouldNotCompute() &&
3626            BTI1.Exact != getCouldNotCompute())
3627          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3628        if (BTI0.Max != getCouldNotCompute() &&
3629            BTI1.Max != getCouldNotCompute())
3630          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3631      }
3632
3633      return BackedgeTakenInfo(BECount, MaxBECount);
3634    }
3635    if (BO->getOpcode() == Instruction::Or) {
3636      // Recurse on the operands of the or.
3637      BackedgeTakenInfo BTI0 =
3638        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3639      BackedgeTakenInfo BTI1 =
3640        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3641      const SCEV *BECount = getCouldNotCompute();
3642      const SCEV *MaxBECount = getCouldNotCompute();
3643      if (L->contains(FBB)) {
3644        // Both conditions must be false for the loop to continue executing.
3645        // Choose the less conservative count.
3646        if (BTI0.Exact == getCouldNotCompute() ||
3647            BTI1.Exact == getCouldNotCompute())
3648          BECount = getCouldNotCompute();
3649        else
3650          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3651        if (BTI0.Max == getCouldNotCompute())
3652          MaxBECount = BTI1.Max;
3653        else if (BTI1.Max == getCouldNotCompute())
3654          MaxBECount = BTI0.Max;
3655        else
3656          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3657      } else {
3658        // Both conditions must be false for the loop to exit.
3659        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3660        if (BTI0.Exact != getCouldNotCompute() &&
3661            BTI1.Exact != getCouldNotCompute())
3662          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3663        if (BTI0.Max != getCouldNotCompute() &&
3664            BTI1.Max != getCouldNotCompute())
3665          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3666      }
3667
3668      return BackedgeTakenInfo(BECount, MaxBECount);
3669    }
3670  }
3671
3672  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3673  // Procede to the next level to examine the icmp.
3674  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3675    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3676
3677  // If it's not an integer or pointer comparison then compute it the hard way.
3678  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3679}
3680
3681/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3682/// backedge of the specified loop will execute if its exit condition
3683/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3684ScalarEvolution::BackedgeTakenInfo
3685ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3686                                                           ICmpInst *ExitCond,
3687                                                           BasicBlock *TBB,
3688                                                           BasicBlock *FBB) {
3689
3690  // If the condition was exit on true, convert the condition to exit on false
3691  ICmpInst::Predicate Cond;
3692  if (!L->contains(FBB))
3693    Cond = ExitCond->getPredicate();
3694  else
3695    Cond = ExitCond->getInversePredicate();
3696
3697  // Handle common loops like: for (X = "string"; *X; ++X)
3698  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3699    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3700      const SCEV *ItCnt =
3701        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3702      if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3703        unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3704        return BackedgeTakenInfo(ItCnt,
3705                                 isa<SCEVConstant>(ItCnt) ? ItCnt :
3706                                   getConstant(APInt::getMaxValue(BitWidth)-1));
3707      }
3708    }
3709
3710  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3711  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3712
3713  // Try to evaluate any dependencies out of the loop.
3714  LHS = getSCEVAtScope(LHS, L);
3715  RHS = getSCEVAtScope(RHS, L);
3716
3717  // At this point, we would like to compute how many iterations of the
3718  // loop the predicate will return true for these inputs.
3719  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3720    // If there is a loop-invariant, force it into the RHS.
3721    std::swap(LHS, RHS);
3722    Cond = ICmpInst::getSwappedPredicate(Cond);
3723  }
3724
3725  // If we have a comparison of a chrec against a constant, try to use value
3726  // ranges to answer this query.
3727  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3728    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3729      if (AddRec->getLoop() == L) {
3730        // Form the constant range.
3731        ConstantRange CompRange(
3732            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3733
3734        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3735        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3736      }
3737
3738  switch (Cond) {
3739  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3740    // Convert to: while (X-Y != 0)
3741    const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3742    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3743    break;
3744  }
3745  case ICmpInst::ICMP_EQ: {                     // while (X == Y)
3746    // Convert to: while (X-Y == 0)
3747    const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3748    if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3749    break;
3750  }
3751  case ICmpInst::ICMP_SLT: {
3752    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3753    if (BTI.hasAnyInfo()) return BTI;
3754    break;
3755  }
3756  case ICmpInst::ICMP_SGT: {
3757    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3758                                             getNotSCEV(RHS), L, true);
3759    if (BTI.hasAnyInfo()) return BTI;
3760    break;
3761  }
3762  case ICmpInst::ICMP_ULT: {
3763    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3764    if (BTI.hasAnyInfo()) return BTI;
3765    break;
3766  }
3767  case ICmpInst::ICMP_UGT: {
3768    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3769                                             getNotSCEV(RHS), L, false);
3770    if (BTI.hasAnyInfo()) return BTI;
3771    break;
3772  }
3773  default:
3774#if 0
3775    dbgs() << "ComputeBackedgeTakenCount ";
3776    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3777      dbgs() << "[unsigned] ";
3778    dbgs() << *LHS << "   "
3779         << Instruction::getOpcodeName(Instruction::ICmp)
3780         << "   " << *RHS << "\n";
3781#endif
3782    break;
3783  }
3784  return
3785    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3786}
3787
3788static ConstantInt *
3789EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3790                                ScalarEvolution &SE) {
3791  const SCEV *InVal = SE.getConstant(C);
3792  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3793  assert(isa<SCEVConstant>(Val) &&
3794         "Evaluation of SCEV at constant didn't fold correctly?");
3795  return cast<SCEVConstant>(Val)->getValue();
3796}
3797
3798/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3799/// and a GEP expression (missing the pointer index) indexing into it, return
3800/// the addressed element of the initializer or null if the index expression is
3801/// invalid.
3802static Constant *
3803GetAddressedElementFromGlobal(GlobalVariable *GV,
3804                              const std::vector<ConstantInt*> &Indices) {
3805  Constant *Init = GV->getInitializer();
3806  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3807    uint64_t Idx = Indices[i]->getZExtValue();
3808    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3809      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3810      Init = cast<Constant>(CS->getOperand(Idx));
3811    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3812      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3813      Init = cast<Constant>(CA->getOperand(Idx));
3814    } else if (isa<ConstantAggregateZero>(Init)) {
3815      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3816        assert(Idx < STy->getNumElements() && "Bad struct index!");
3817        Init = Constant::getNullValue(STy->getElementType(Idx));
3818      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3819        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3820        Init = Constant::getNullValue(ATy->getElementType());
3821      } else {
3822        llvm_unreachable("Unknown constant aggregate type!");
3823      }
3824      return 0;
3825    } else {
3826      return 0; // Unknown initializer type
3827    }
3828  }
3829  return Init;
3830}
3831
3832/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3833/// 'icmp op load X, cst', try to see if we can compute the backedge
3834/// execution count.
3835const SCEV *
3836ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3837                                                LoadInst *LI,
3838                                                Constant *RHS,
3839                                                const Loop *L,
3840                                                ICmpInst::Predicate predicate) {
3841  if (LI->isVolatile()) return getCouldNotCompute();
3842
3843  // Check to see if the loaded pointer is a getelementptr of a global.
3844  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3845  if (!GEP) return getCouldNotCompute();
3846
3847  // Make sure that it is really a constant global we are gepping, with an
3848  // initializer, and make sure the first IDX is really 0.
3849  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3850  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
3851      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3852      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3853    return getCouldNotCompute();
3854
3855  // Okay, we allow one non-constant index into the GEP instruction.
3856  Value *VarIdx = 0;
3857  std::vector<ConstantInt*> Indexes;
3858  unsigned VarIdxNum = 0;
3859  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3860    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3861      Indexes.push_back(CI);
3862    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3863      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3864      VarIdx = GEP->getOperand(i);
3865      VarIdxNum = i-2;
3866      Indexes.push_back(0);
3867    }
3868
3869  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3870  // Check to see if X is a loop variant variable value now.
3871  const SCEV *Idx = getSCEV(VarIdx);
3872  Idx = getSCEVAtScope(Idx, L);
3873
3874  // We can only recognize very limited forms of loop index expressions, in
3875  // particular, only affine AddRec's like {C1,+,C2}.
3876  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3877  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3878      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3879      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3880    return getCouldNotCompute();
3881
3882  unsigned MaxSteps = MaxBruteForceIterations;
3883  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3884    ConstantInt *ItCst = ConstantInt::get(
3885                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
3886    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3887
3888    // Form the GEP offset.
3889    Indexes[VarIdxNum] = Val;
3890
3891    Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3892    if (Result == 0) break;  // Cannot compute!
3893
3894    // Evaluate the condition for this iteration.
3895    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3896    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3897    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3898#if 0
3899      dbgs() << "\n***\n*** Computed loop count " << *ItCst
3900             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3901             << "***\n";
3902#endif
3903      ++NumArrayLenItCounts;
3904      return getConstant(ItCst);   // Found terminating iteration!
3905    }
3906  }
3907  return getCouldNotCompute();
3908}
3909
3910
3911/// CanConstantFold - Return true if we can constant fold an instruction of the
3912/// specified type, assuming that all operands were constants.
3913static bool CanConstantFold(const Instruction *I) {
3914  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3915      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3916    return true;
3917
3918  if (const CallInst *CI = dyn_cast<CallInst>(I))
3919    if (const Function *F = CI->getCalledFunction())
3920      return canConstantFoldCallTo(F);
3921  return false;
3922}
3923
3924/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3925/// in the loop that V is derived from.  We allow arbitrary operations along the
3926/// way, but the operands of an operation must either be constants or a value
3927/// derived from a constant PHI.  If this expression does not fit with these
3928/// constraints, return null.
3929static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3930  // If this is not an instruction, or if this is an instruction outside of the
3931  // loop, it can't be derived from a loop PHI.
3932  Instruction *I = dyn_cast<Instruction>(V);
3933  if (I == 0 || !L->contains(I)) return 0;
3934
3935  if (PHINode *PN = dyn_cast<PHINode>(I)) {
3936    if (L->getHeader() == I->getParent())
3937      return PN;
3938    else
3939      // We don't currently keep track of the control flow needed to evaluate
3940      // PHIs, so we cannot handle PHIs inside of loops.
3941      return 0;
3942  }
3943
3944  // If we won't be able to constant fold this expression even if the operands
3945  // are constants, return early.
3946  if (!CanConstantFold(I)) return 0;
3947
3948  // Otherwise, we can evaluate this instruction if all of its operands are
3949  // constant or derived from a PHI node themselves.
3950  PHINode *PHI = 0;
3951  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3952    if (!(isa<Constant>(I->getOperand(Op)) ||
3953          isa<GlobalValue>(I->getOperand(Op)))) {
3954      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3955      if (P == 0) return 0;  // Not evolving from PHI
3956      if (PHI == 0)
3957        PHI = P;
3958      else if (PHI != P)
3959        return 0;  // Evolving from multiple different PHIs.
3960    }
3961
3962  // This is a expression evolving from a constant PHI!
3963  return PHI;
3964}
3965
3966/// EvaluateExpression - Given an expression that passes the
3967/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3968/// in the loop has the value PHIVal.  If we can't fold this expression for some
3969/// reason, return null.
3970static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
3971                                    const TargetData *TD) {
3972  if (isa<PHINode>(V)) return PHIVal;
3973  if (Constant *C = dyn_cast<Constant>(V)) return C;
3974  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3975  Instruction *I = cast<Instruction>(V);
3976
3977  std::vector<Constant*> Operands;
3978  Operands.resize(I->getNumOperands());
3979
3980  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3981    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
3982    if (Operands[i] == 0) return 0;
3983  }
3984
3985  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3986    return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
3987                                           Operands[1], TD);
3988  return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3989                                  &Operands[0], Operands.size(), TD);
3990}
3991
3992/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3993/// in the header of its containing loop, we know the loop executes a
3994/// constant number of times, and the PHI node is just a recurrence
3995/// involving constants, fold it.
3996Constant *
3997ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3998                                                   const APInt &BEs,
3999                                                   const Loop *L) {
4000  std::map<PHINode*, Constant*>::iterator I =
4001    ConstantEvolutionLoopExitValue.find(PN);
4002  if (I != ConstantEvolutionLoopExitValue.end())
4003    return I->second;
4004
4005  if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
4006    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
4007
4008  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4009
4010  // Since the loop is canonicalized, the PHI node must have two entries.  One
4011  // entry must be a constant (coming in from outside of the loop), and the
4012  // second must be derived from the same PHI.
4013  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4014  Constant *StartCST =
4015    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4016  if (StartCST == 0)
4017    return RetVal = 0;  // Must be a constant.
4018
4019  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4020  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4021  if (PN2 != PN)
4022    return RetVal = 0;  // Not derived from same PHI.
4023
4024  // Execute the loop symbolically to determine the exit value.
4025  if (BEs.getActiveBits() >= 32)
4026    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4027
4028  unsigned NumIterations = BEs.getZExtValue(); // must be in range
4029  unsigned IterationNum = 0;
4030  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
4031    if (IterationNum == NumIterations)
4032      return RetVal = PHIVal;  // Got exit value!
4033
4034    // Compute the value of the PHI node for the next iteration.
4035    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4036    if (NextPHI == PHIVal)
4037      return RetVal = NextPHI;  // Stopped evolving!
4038    if (NextPHI == 0)
4039      return 0;        // Couldn't evaluate!
4040    PHIVal = NextPHI;
4041  }
4042}
4043
4044/// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4045/// constant number of times (the condition evolves only from constants),
4046/// try to evaluate a few iterations of the loop until we get the exit
4047/// condition gets a value of ExitWhen (true or false).  If we cannot
4048/// evaluate the trip count of the loop, return getCouldNotCompute().
4049const SCEV *
4050ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
4051                                                       Value *Cond,
4052                                                       bool ExitWhen) {
4053  PHINode *PN = getConstantEvolvingPHI(Cond, L);
4054  if (PN == 0) return getCouldNotCompute();
4055
4056  // Since the loop is canonicalized, the PHI node must have two entries.  One
4057  // entry must be a constant (coming in from outside of the loop), and the
4058  // second must be derived from the same PHI.
4059  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4060  Constant *StartCST =
4061    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4062  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
4063
4064  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4065  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4066  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
4067
4068  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
4069  // the loop symbolically to determine when the condition gets a value of
4070  // "ExitWhen".
4071  unsigned IterationNum = 0;
4072  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
4073  for (Constant *PHIVal = StartCST;
4074       IterationNum != MaxIterations; ++IterationNum) {
4075    ConstantInt *CondVal =
4076      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
4077
4078    // Couldn't symbolically evaluate.
4079    if (!CondVal) return getCouldNotCompute();
4080
4081    if (CondVal->getValue() == uint64_t(ExitWhen)) {
4082      ++NumBruteForceTripCountsComputed;
4083      return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4084    }
4085
4086    // Compute the value of the PHI node for the next iteration.
4087    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4088    if (NextPHI == 0 || NextPHI == PHIVal)
4089      return getCouldNotCompute();// Couldn't evaluate or not making progress...
4090    PHIVal = NextPHI;
4091  }
4092
4093  // Too many iterations were needed to evaluate.
4094  return getCouldNotCompute();
4095}
4096
4097/// getSCEVAtScope - Return a SCEV expression for the specified value
4098/// at the specified scope in the program.  The L value specifies a loop
4099/// nest to evaluate the expression at, where null is the top-level or a
4100/// specified loop is immediately inside of the loop.
4101///
4102/// This method can be used to compute the exit value for a variable defined
4103/// in a loop by querying what the value will hold in the parent loop.
4104///
4105/// In the case that a relevant loop exit value cannot be computed, the
4106/// original value V is returned.
4107const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4108  // Check to see if we've folded this expression at this loop before.
4109  std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4110  std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4111    Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4112  if (!Pair.second)
4113    return Pair.first->second ? Pair.first->second : V;
4114
4115  // Otherwise compute it.
4116  const SCEV *C = computeSCEVAtScope(V, L);
4117  ValuesAtScopes[V][L] = C;
4118  return C;
4119}
4120
4121const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4122  if (isa<SCEVConstant>(V)) return V;
4123
4124  // If this instruction is evolved from a constant-evolving PHI, compute the
4125  // exit value from the loop without using SCEVs.
4126  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4127    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4128      const Loop *LI = (*this->LI)[I->getParent()];
4129      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
4130        if (PHINode *PN = dyn_cast<PHINode>(I))
4131          if (PN->getParent() == LI->getHeader()) {
4132            // Okay, there is no closed form solution for the PHI node.  Check
4133            // to see if the loop that contains it has a known backedge-taken
4134            // count.  If so, we may be able to force computation of the exit
4135            // value.
4136            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4137            if (const SCEVConstant *BTCC =
4138                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4139              // Okay, we know how many times the containing loop executes.  If
4140              // this is a constant evolving PHI node, get the final value at
4141              // the specified iteration number.
4142              Constant *RV = getConstantEvolutionLoopExitValue(PN,
4143                                                   BTCC->getValue()->getValue(),
4144                                                               LI);
4145              if (RV) return getSCEV(RV);
4146            }
4147          }
4148
4149      // Okay, this is an expression that we cannot symbolically evaluate
4150      // into a SCEV.  Check to see if it's possible to symbolically evaluate
4151      // the arguments into constants, and if so, try to constant propagate the
4152      // result.  This is particularly useful for computing loop exit values.
4153      if (CanConstantFold(I)) {
4154        std::vector<Constant*> Operands;
4155        Operands.reserve(I->getNumOperands());
4156        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4157          Value *Op = I->getOperand(i);
4158          if (Constant *C = dyn_cast<Constant>(Op)) {
4159            Operands.push_back(C);
4160          } else {
4161            // If any of the operands is non-constant and if they are
4162            // non-integer and non-pointer, don't even try to analyze them
4163            // with scev techniques.
4164            if (!isSCEVable(Op->getType()))
4165              return V;
4166
4167            const SCEV *OpV = getSCEVAtScope(Op, L);
4168            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
4169              Constant *C = SC->getValue();
4170              if (C->getType() != Op->getType())
4171                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4172                                                                  Op->getType(),
4173                                                                  false),
4174                                          C, Op->getType());
4175              Operands.push_back(C);
4176            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
4177              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
4178                if (C->getType() != Op->getType())
4179                  C =
4180                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4181                                                                  Op->getType(),
4182                                                                  false),
4183                                          C, Op->getType());
4184                Operands.push_back(C);
4185              } else
4186                return V;
4187            } else {
4188              return V;
4189            }
4190          }
4191        }
4192
4193        Constant *C;
4194        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4195          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4196                                              Operands[0], Operands[1], TD);
4197        else
4198          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4199                                       &Operands[0], Operands.size(), TD);
4200        return getSCEV(C);
4201      }
4202    }
4203
4204    // This is some other type of SCEVUnknown, just return it.
4205    return V;
4206  }
4207
4208  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
4209    // Avoid performing the look-up in the common case where the specified
4210    // expression has no loop-variant portions.
4211    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
4212      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4213      if (OpAtScope != Comm->getOperand(i)) {
4214        // Okay, at least one of these operands is loop variant but might be
4215        // foldable.  Build a new instance of the folded commutative expression.
4216        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
4217                                            Comm->op_begin()+i);
4218        NewOps.push_back(OpAtScope);
4219
4220        for (++i; i != e; ++i) {
4221          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4222          NewOps.push_back(OpAtScope);
4223        }
4224        if (isa<SCEVAddExpr>(Comm))
4225          return getAddExpr(NewOps);
4226        if (isa<SCEVMulExpr>(Comm))
4227          return getMulExpr(NewOps);
4228        if (isa<SCEVSMaxExpr>(Comm))
4229          return getSMaxExpr(NewOps);
4230        if (isa<SCEVUMaxExpr>(Comm))
4231          return getUMaxExpr(NewOps);
4232        llvm_unreachable("Unknown commutative SCEV type!");
4233      }
4234    }
4235    // If we got here, all operands are loop invariant.
4236    return Comm;
4237  }
4238
4239  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
4240    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
4241    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
4242    if (LHS == Div->getLHS() && RHS == Div->getRHS())
4243      return Div;   // must be loop invariant
4244    return getUDivExpr(LHS, RHS);
4245  }
4246
4247  // If this is a loop recurrence for a loop that does not contain L, then we
4248  // are dealing with the final value computed by the loop.
4249  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4250    if (!L || !AddRec->getLoop()->contains(L)) {
4251      // To evaluate this recurrence, we need to know how many times the AddRec
4252      // loop iterates.  Compute this now.
4253      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
4254      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
4255
4256      // Then, evaluate the AddRec.
4257      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
4258    }
4259    return AddRec;
4260  }
4261
4262  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
4263    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4264    if (Op == Cast->getOperand())
4265      return Cast;  // must be loop invariant
4266    return getZeroExtendExpr(Op, Cast->getType());
4267  }
4268
4269  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
4270    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4271    if (Op == Cast->getOperand())
4272      return Cast;  // must be loop invariant
4273    return getSignExtendExpr(Op, Cast->getType());
4274  }
4275
4276  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
4277    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4278    if (Op == Cast->getOperand())
4279      return Cast;  // must be loop invariant
4280    return getTruncateExpr(Op, Cast->getType());
4281  }
4282
4283  llvm_unreachable("Unknown SCEV type!");
4284  return 0;
4285}
4286
4287/// getSCEVAtScope - This is a convenience function which does
4288/// getSCEVAtScope(getSCEV(V), L).
4289const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
4290  return getSCEVAtScope(getSCEV(V), L);
4291}
4292
4293/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4294/// following equation:
4295///
4296///     A * X = B (mod N)
4297///
4298/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4299/// A and B isn't important.
4300///
4301/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4302static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
4303                                               ScalarEvolution &SE) {
4304  uint32_t BW = A.getBitWidth();
4305  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
4306  assert(A != 0 && "A must be non-zero.");
4307
4308  // 1. D = gcd(A, N)
4309  //
4310  // The gcd of A and N may have only one prime factor: 2. The number of
4311  // trailing zeros in A is its multiplicity
4312  uint32_t Mult2 = A.countTrailingZeros();
4313  // D = 2^Mult2
4314
4315  // 2. Check if B is divisible by D.
4316  //
4317  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4318  // is not less than multiplicity of this prime factor for D.
4319  if (B.countTrailingZeros() < Mult2)
4320    return SE.getCouldNotCompute();
4321
4322  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4323  // modulo (N / D).
4324  //
4325  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4326  // bit width during computations.
4327  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4328  APInt Mod(BW + 1, 0);
4329  Mod.set(BW - Mult2);  // Mod = N / D
4330  APInt I = AD.multiplicativeInverse(Mod);
4331
4332  // 4. Compute the minimum unsigned root of the equation:
4333  // I * (B / D) mod (N / D)
4334  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4335
4336  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4337  // bits.
4338  return SE.getConstant(Result.trunc(BW));
4339}
4340
4341/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4342/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4343/// might be the same) or two SCEVCouldNotCompute objects.
4344///
4345static std::pair<const SCEV *,const SCEV *>
4346SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4347  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4348  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4349  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4350  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4351
4352  // We currently can only solve this if the coefficients are constants.
4353  if (!LC || !MC || !NC) {
4354    const SCEV *CNC = SE.getCouldNotCompute();
4355    return std::make_pair(CNC, CNC);
4356  }
4357
4358  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4359  const APInt &L = LC->getValue()->getValue();
4360  const APInt &M = MC->getValue()->getValue();
4361  const APInt &N = NC->getValue()->getValue();
4362  APInt Two(BitWidth, 2);
4363  APInt Four(BitWidth, 4);
4364
4365  {
4366    using namespace APIntOps;
4367    const APInt& C = L;
4368    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4369    // The B coefficient is M-N/2
4370    APInt B(M);
4371    B -= sdiv(N,Two);
4372
4373    // The A coefficient is N/2
4374    APInt A(N.sdiv(Two));
4375
4376    // Compute the B^2-4ac term.
4377    APInt SqrtTerm(B);
4378    SqrtTerm *= B;
4379    SqrtTerm -= Four * (A * C);
4380
4381    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4382    // integer value or else APInt::sqrt() will assert.
4383    APInt SqrtVal(SqrtTerm.sqrt());
4384
4385    // Compute the two solutions for the quadratic formula.
4386    // The divisions must be performed as signed divisions.
4387    APInt NegB(-B);
4388    APInt TwoA( A << 1 );
4389    if (TwoA.isMinValue()) {
4390      const SCEV *CNC = SE.getCouldNotCompute();
4391      return std::make_pair(CNC, CNC);
4392    }
4393
4394    LLVMContext &Context = SE.getContext();
4395
4396    ConstantInt *Solution1 =
4397      ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4398    ConstantInt *Solution2 =
4399      ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4400
4401    return std::make_pair(SE.getConstant(Solution1),
4402                          SE.getConstant(Solution2));
4403    } // end APIntOps namespace
4404}
4405
4406/// HowFarToZero - Return the number of times a backedge comparing the specified
4407/// value to zero will execute.  If not computable, return CouldNotCompute.
4408const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4409  // If the value is a constant
4410  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4411    // If the value is already zero, the branch will execute zero times.
4412    if (C->getValue()->isZero()) return C;
4413    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4414  }
4415
4416  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4417  if (!AddRec || AddRec->getLoop() != L)
4418    return getCouldNotCompute();
4419
4420  if (AddRec->isAffine()) {
4421    // If this is an affine expression, the execution count of this branch is
4422    // the minimum unsigned root of the following equation:
4423    //
4424    //     Start + Step*N = 0 (mod 2^BW)
4425    //
4426    // equivalent to:
4427    //
4428    //             Step*N = -Start (mod 2^BW)
4429    //
4430    // where BW is the common bit width of Start and Step.
4431
4432    // Get the initial value for the loop.
4433    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4434                                       L->getParentLoop());
4435    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4436                                      L->getParentLoop());
4437
4438    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4439      // For now we handle only constant steps.
4440
4441      // First, handle unitary steps.
4442      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4443        return getNegativeSCEV(Start);          //   N = -Start (as unsigned)
4444      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4445        return Start;                           //    N = Start (as unsigned)
4446
4447      // Then, try to solve the above equation provided that Start is constant.
4448      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4449        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4450                                            -StartC->getValue()->getValue(),
4451                                            *this);
4452    }
4453  } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
4454    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4455    // the quadratic equation to solve it.
4456    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4457                                                                    *this);
4458    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4459    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4460    if (R1) {
4461#if 0
4462      dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
4463             << "  sol#2: " << *R2 << "\n";
4464#endif
4465      // Pick the smallest positive root value.
4466      if (ConstantInt *CB =
4467          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4468                                   R1->getValue(), R2->getValue()))) {
4469        if (CB->getZExtValue() == false)
4470          std::swap(R1, R2);   // R1 is the minimum root now.
4471
4472        // We can only use this value if the chrec ends up with an exact zero
4473        // value at this index.  When solving for "X*X != 5", for example, we
4474        // should not accept a root of 2.
4475        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4476        if (Val->isZero())
4477          return R1;  // We found a quadratic root!
4478      }
4479    }
4480  }
4481
4482  return getCouldNotCompute();
4483}
4484
4485/// HowFarToNonZero - Return the number of times a backedge checking the
4486/// specified value for nonzero will execute.  If not computable, return
4487/// CouldNotCompute
4488const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4489  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4490  // handle them yet except for the trivial case.  This could be expanded in the
4491  // future as needed.
4492
4493  // If the value is a constant, check to see if it is known to be non-zero
4494  // already.  If so, the backedge will execute zero times.
4495  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4496    if (!C->getValue()->isNullValue())
4497      return getIntegerSCEV(0, C->getType());
4498    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4499  }
4500
4501  // We could implement others, but I really doubt anyone writes loops like
4502  // this, and if they did, they would already be constant folded.
4503  return getCouldNotCompute();
4504}
4505
4506/// getLoopPredecessor - If the given loop's header has exactly one unique
4507/// predecessor outside the loop, return it. Otherwise return null.
4508///
4509BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4510  BasicBlock *Header = L->getHeader();
4511  BasicBlock *Pred = 0;
4512  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4513       PI != E; ++PI)
4514    if (!L->contains(*PI)) {
4515      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4516      Pred = *PI;
4517    }
4518  return Pred;
4519}
4520
4521/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4522/// (which may not be an immediate predecessor) which has exactly one
4523/// successor from which BB is reachable, or null if no such block is
4524/// found.
4525///
4526BasicBlock *
4527ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4528  // If the block has a unique predecessor, then there is no path from the
4529  // predecessor to the block that does not go through the direct edge
4530  // from the predecessor to the block.
4531  if (BasicBlock *Pred = BB->getSinglePredecessor())
4532    return Pred;
4533
4534  // A loop's header is defined to be a block that dominates the loop.
4535  // If the header has a unique predecessor outside the loop, it must be
4536  // a block that has exactly one successor that can reach the loop.
4537  if (Loop *L = LI->getLoopFor(BB))
4538    return getLoopPredecessor(L);
4539
4540  return 0;
4541}
4542
4543/// HasSameValue - SCEV structural equivalence is usually sufficient for
4544/// testing whether two expressions are equal, however for the purposes of
4545/// looking for a condition guarding a loop, it can be useful to be a little
4546/// more general, since a front-end may have replicated the controlling
4547/// expression.
4548///
4549static bool HasSameValue(const SCEV *A, const SCEV *B) {
4550  // Quick check to see if they are the same SCEV.
4551  if (A == B) return true;
4552
4553  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4554  // two different instructions with the same value. Check for this case.
4555  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4556    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4557      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4558        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4559          if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
4560            return true;
4561
4562  // Otherwise assume they may have a different value.
4563  return false;
4564}
4565
4566bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4567  return getSignedRange(S).getSignedMax().isNegative();
4568}
4569
4570bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4571  return getSignedRange(S).getSignedMin().isStrictlyPositive();
4572}
4573
4574bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4575  return !getSignedRange(S).getSignedMin().isNegative();
4576}
4577
4578bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4579  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4580}
4581
4582bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4583  return isKnownNegative(S) || isKnownPositive(S);
4584}
4585
4586bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4587                                       const SCEV *LHS, const SCEV *RHS) {
4588
4589  if (HasSameValue(LHS, RHS))
4590    return ICmpInst::isTrueWhenEqual(Pred);
4591
4592  switch (Pred) {
4593  default:
4594    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4595    break;
4596  case ICmpInst::ICMP_SGT:
4597    Pred = ICmpInst::ICMP_SLT;
4598    std::swap(LHS, RHS);
4599  case ICmpInst::ICMP_SLT: {
4600    ConstantRange LHSRange = getSignedRange(LHS);
4601    ConstantRange RHSRange = getSignedRange(RHS);
4602    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4603      return true;
4604    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4605      return false;
4606    break;
4607  }
4608  case ICmpInst::ICMP_SGE:
4609    Pred = ICmpInst::ICMP_SLE;
4610    std::swap(LHS, RHS);
4611  case ICmpInst::ICMP_SLE: {
4612    ConstantRange LHSRange = getSignedRange(LHS);
4613    ConstantRange RHSRange = getSignedRange(RHS);
4614    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4615      return true;
4616    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4617      return false;
4618    break;
4619  }
4620  case ICmpInst::ICMP_UGT:
4621    Pred = ICmpInst::ICMP_ULT;
4622    std::swap(LHS, RHS);
4623  case ICmpInst::ICMP_ULT: {
4624    ConstantRange LHSRange = getUnsignedRange(LHS);
4625    ConstantRange RHSRange = getUnsignedRange(RHS);
4626    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4627      return true;
4628    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4629      return false;
4630    break;
4631  }
4632  case ICmpInst::ICMP_UGE:
4633    Pred = ICmpInst::ICMP_ULE;
4634    std::swap(LHS, RHS);
4635  case ICmpInst::ICMP_ULE: {
4636    ConstantRange LHSRange = getUnsignedRange(LHS);
4637    ConstantRange RHSRange = getUnsignedRange(RHS);
4638    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4639      return true;
4640    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4641      return false;
4642    break;
4643  }
4644  case ICmpInst::ICMP_NE: {
4645    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4646      return true;
4647    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4648      return true;
4649
4650    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4651    if (isKnownNonZero(Diff))
4652      return true;
4653    break;
4654  }
4655  case ICmpInst::ICMP_EQ:
4656    // The check at the top of the function catches the case where
4657    // the values are known to be equal.
4658    break;
4659  }
4660  return false;
4661}
4662
4663/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4664/// protected by a conditional between LHS and RHS.  This is used to
4665/// to eliminate casts.
4666bool
4667ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4668                                             ICmpInst::Predicate Pred,
4669                                             const SCEV *LHS, const SCEV *RHS) {
4670  // Interpret a null as meaning no loop, where there is obviously no guard
4671  // (interprocedural conditions notwithstanding).
4672  if (!L) return true;
4673
4674  BasicBlock *Latch = L->getLoopLatch();
4675  if (!Latch)
4676    return false;
4677
4678  BranchInst *LoopContinuePredicate =
4679    dyn_cast<BranchInst>(Latch->getTerminator());
4680  if (!LoopContinuePredicate ||
4681      LoopContinuePredicate->isUnconditional())
4682    return false;
4683
4684  return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4685                       LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4686}
4687
4688/// isLoopGuardedByCond - Test whether entry to the loop is protected
4689/// by a conditional between LHS and RHS.  This is used to help avoid max
4690/// expressions in loop trip counts, and to eliminate casts.
4691bool
4692ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4693                                     ICmpInst::Predicate Pred,
4694                                     const SCEV *LHS, const SCEV *RHS) {
4695  // Interpret a null as meaning no loop, where there is obviously no guard
4696  // (interprocedural conditions notwithstanding).
4697  if (!L) return false;
4698
4699  BasicBlock *Predecessor = getLoopPredecessor(L);
4700  BasicBlock *PredecessorDest = L->getHeader();
4701
4702  // Starting at the loop predecessor, climb up the predecessor chain, as long
4703  // as there are predecessors that can be found that have unique successors
4704  // leading to the original header.
4705  for (; Predecessor;
4706       PredecessorDest = Predecessor,
4707       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4708
4709    BranchInst *LoopEntryPredicate =
4710      dyn_cast<BranchInst>(Predecessor->getTerminator());
4711    if (!LoopEntryPredicate ||
4712        LoopEntryPredicate->isUnconditional())
4713      continue;
4714
4715    if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4716                      LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4717      return true;
4718  }
4719
4720  return false;
4721}
4722
4723/// isImpliedCond - Test whether the condition described by Pred, LHS,
4724/// and RHS is true whenever the given Cond value evaluates to true.
4725bool ScalarEvolution::isImpliedCond(Value *CondValue,
4726                                    ICmpInst::Predicate Pred,
4727                                    const SCEV *LHS, const SCEV *RHS,
4728                                    bool Inverse) {
4729  // Recursivly handle And and Or conditions.
4730  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4731    if (BO->getOpcode() == Instruction::And) {
4732      if (!Inverse)
4733        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4734               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4735    } else if (BO->getOpcode() == Instruction::Or) {
4736      if (Inverse)
4737        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4738               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4739    }
4740  }
4741
4742  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4743  if (!ICI) return false;
4744
4745  // Bail if the ICmp's operands' types are wider than the needed type
4746  // before attempting to call getSCEV on them. This avoids infinite
4747  // recursion, since the analysis of widening casts can require loop
4748  // exit condition information for overflow checking, which would
4749  // lead back here.
4750  if (getTypeSizeInBits(LHS->getType()) <
4751      getTypeSizeInBits(ICI->getOperand(0)->getType()))
4752    return false;
4753
4754  // Now that we found a conditional branch that dominates the loop, check to
4755  // see if it is the comparison we are looking for.
4756  ICmpInst::Predicate FoundPred;
4757  if (Inverse)
4758    FoundPred = ICI->getInversePredicate();
4759  else
4760    FoundPred = ICI->getPredicate();
4761
4762  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
4763  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
4764
4765  // Balance the types. The case where FoundLHS' type is wider than
4766  // LHS' type is checked for above.
4767  if (getTypeSizeInBits(LHS->getType()) >
4768      getTypeSizeInBits(FoundLHS->getType())) {
4769    if (CmpInst::isSigned(Pred)) {
4770      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4771      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4772    } else {
4773      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4774      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4775    }
4776  }
4777
4778  // Canonicalize the query to match the way instcombine will have
4779  // canonicalized the comparison.
4780  // First, put a constant operand on the right.
4781  if (isa<SCEVConstant>(LHS)) {
4782    std::swap(LHS, RHS);
4783    Pred = ICmpInst::getSwappedPredicate(Pred);
4784  }
4785  // Then, canonicalize comparisons with boundary cases.
4786  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4787    const APInt &RA = RC->getValue()->getValue();
4788    switch (Pred) {
4789    default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4790    case ICmpInst::ICMP_EQ:
4791    case ICmpInst::ICMP_NE:
4792      break;
4793    case ICmpInst::ICMP_UGE:
4794      if ((RA - 1).isMinValue()) {
4795        Pred = ICmpInst::ICMP_NE;
4796        RHS = getConstant(RA - 1);
4797        break;
4798      }
4799      if (RA.isMaxValue()) {
4800        Pred = ICmpInst::ICMP_EQ;
4801        break;
4802      }
4803      if (RA.isMinValue()) return true;
4804      break;
4805    case ICmpInst::ICMP_ULE:
4806      if ((RA + 1).isMaxValue()) {
4807        Pred = ICmpInst::ICMP_NE;
4808        RHS = getConstant(RA + 1);
4809        break;
4810      }
4811      if (RA.isMinValue()) {
4812        Pred = ICmpInst::ICMP_EQ;
4813        break;
4814      }
4815      if (RA.isMaxValue()) return true;
4816      break;
4817    case ICmpInst::ICMP_SGE:
4818      if ((RA - 1).isMinSignedValue()) {
4819        Pred = ICmpInst::ICMP_NE;
4820        RHS = getConstant(RA - 1);
4821        break;
4822      }
4823      if (RA.isMaxSignedValue()) {
4824        Pred = ICmpInst::ICMP_EQ;
4825        break;
4826      }
4827      if (RA.isMinSignedValue()) return true;
4828      break;
4829    case ICmpInst::ICMP_SLE:
4830      if ((RA + 1).isMaxSignedValue()) {
4831        Pred = ICmpInst::ICMP_NE;
4832        RHS = getConstant(RA + 1);
4833        break;
4834      }
4835      if (RA.isMinSignedValue()) {
4836        Pred = ICmpInst::ICMP_EQ;
4837        break;
4838      }
4839      if (RA.isMaxSignedValue()) return true;
4840      break;
4841    case ICmpInst::ICMP_UGT:
4842      if (RA.isMinValue()) {
4843        Pred = ICmpInst::ICMP_NE;
4844        break;
4845      }
4846      if ((RA + 1).isMaxValue()) {
4847        Pred = ICmpInst::ICMP_EQ;
4848        RHS = getConstant(RA + 1);
4849        break;
4850      }
4851      if (RA.isMaxValue()) return false;
4852      break;
4853    case ICmpInst::ICMP_ULT:
4854      if (RA.isMaxValue()) {
4855        Pred = ICmpInst::ICMP_NE;
4856        break;
4857      }
4858      if ((RA - 1).isMinValue()) {
4859        Pred = ICmpInst::ICMP_EQ;
4860        RHS = getConstant(RA - 1);
4861        break;
4862      }
4863      if (RA.isMinValue()) return false;
4864      break;
4865    case ICmpInst::ICMP_SGT:
4866      if (RA.isMinSignedValue()) {
4867        Pred = ICmpInst::ICMP_NE;
4868        break;
4869      }
4870      if ((RA + 1).isMaxSignedValue()) {
4871        Pred = ICmpInst::ICMP_EQ;
4872        RHS = getConstant(RA + 1);
4873        break;
4874      }
4875      if (RA.isMaxSignedValue()) return false;
4876      break;
4877    case ICmpInst::ICMP_SLT:
4878      if (RA.isMaxSignedValue()) {
4879        Pred = ICmpInst::ICMP_NE;
4880        break;
4881      }
4882      if ((RA - 1).isMinSignedValue()) {
4883       Pred = ICmpInst::ICMP_EQ;
4884       RHS = getConstant(RA - 1);
4885       break;
4886      }
4887      if (RA.isMinSignedValue()) return false;
4888      break;
4889    }
4890  }
4891
4892  // Check to see if we can make the LHS or RHS match.
4893  if (LHS == FoundRHS || RHS == FoundLHS) {
4894    if (isa<SCEVConstant>(RHS)) {
4895      std::swap(FoundLHS, FoundRHS);
4896      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
4897    } else {
4898      std::swap(LHS, RHS);
4899      Pred = ICmpInst::getSwappedPredicate(Pred);
4900    }
4901  }
4902
4903  // Check whether the found predicate is the same as the desired predicate.
4904  if (FoundPred == Pred)
4905    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
4906
4907  // Check whether swapping the found predicate makes it the same as the
4908  // desired predicate.
4909  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
4910    if (isa<SCEVConstant>(RHS))
4911      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
4912    else
4913      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
4914                                   RHS, LHS, FoundLHS, FoundRHS);
4915  }
4916
4917  // Check whether the actual condition is beyond sufficient.
4918  if (FoundPred == ICmpInst::ICMP_EQ)
4919    if (ICmpInst::isTrueWhenEqual(Pred))
4920      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
4921        return true;
4922  if (Pred == ICmpInst::ICMP_NE)
4923    if (!ICmpInst::isTrueWhenEqual(FoundPred))
4924      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
4925        return true;
4926
4927  // Otherwise assume the worst.
4928  return false;
4929}
4930
4931/// isImpliedCondOperands - Test whether the condition described by Pred,
4932/// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS,
4933/// and FoundRHS is true.
4934bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
4935                                            const SCEV *LHS, const SCEV *RHS,
4936                                            const SCEV *FoundLHS,
4937                                            const SCEV *FoundRHS) {
4938  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
4939                                     FoundLHS, FoundRHS) ||
4940         // ~x < ~y --> x > y
4941         isImpliedCondOperandsHelper(Pred, LHS, RHS,
4942                                     getNotSCEV(FoundRHS),
4943                                     getNotSCEV(FoundLHS));
4944}
4945
4946/// isImpliedCondOperandsHelper - Test whether the condition described by
4947/// Pred, LHS, and RHS is true whenever the condition desribed by Pred,
4948/// FoundLHS, and FoundRHS is true.
4949bool
4950ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
4951                                             const SCEV *LHS, const SCEV *RHS,
4952                                             const SCEV *FoundLHS,
4953                                             const SCEV *FoundRHS) {
4954  switch (Pred) {
4955  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4956  case ICmpInst::ICMP_EQ:
4957  case ICmpInst::ICMP_NE:
4958    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
4959      return true;
4960    break;
4961  case ICmpInst::ICMP_SLT:
4962  case ICmpInst::ICMP_SLE:
4963    if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
4964        isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
4965      return true;
4966    break;
4967  case ICmpInst::ICMP_SGT:
4968  case ICmpInst::ICMP_SGE:
4969    if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
4970        isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
4971      return true;
4972    break;
4973  case ICmpInst::ICMP_ULT:
4974  case ICmpInst::ICMP_ULE:
4975    if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
4976        isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
4977      return true;
4978    break;
4979  case ICmpInst::ICMP_UGT:
4980  case ICmpInst::ICMP_UGE:
4981    if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
4982        isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
4983      return true;
4984    break;
4985  }
4986
4987  return false;
4988}
4989
4990/// getBECount - Subtract the end and start values and divide by the step,
4991/// rounding up, to get the number of times the backedge is executed. Return
4992/// CouldNotCompute if an intermediate computation overflows.
4993const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4994                                        const SCEV *End,
4995                                        const SCEV *Step,
4996                                        bool NoWrap) {
4997  assert(!isKnownNegative(Step) &&
4998         "This code doesn't handle negative strides yet!");
4999
5000  const Type *Ty = Start->getType();
5001  const SCEV *NegOne = getIntegerSCEV(-1, Ty);
5002  const SCEV *Diff = getMinusSCEV(End, Start);
5003  const SCEV *RoundUp = getAddExpr(Step, NegOne);
5004
5005  // Add an adjustment to the difference between End and Start so that
5006  // the division will effectively round up.
5007  const SCEV *Add = getAddExpr(Diff, RoundUp);
5008
5009  if (!NoWrap) {
5010    // Check Add for unsigned overflow.
5011    // TODO: More sophisticated things could be done here.
5012    const Type *WideTy = IntegerType::get(getContext(),
5013                                          getTypeSizeInBits(Ty) + 1);
5014    const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
5015    const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
5016    const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
5017    if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
5018      return getCouldNotCompute();
5019  }
5020
5021  return getUDivExpr(Add, Step);
5022}
5023
5024/// HowManyLessThans - Return the number of times a backedge containing the
5025/// specified less-than comparison will execute.  If not computable, return
5026/// CouldNotCompute.
5027ScalarEvolution::BackedgeTakenInfo
5028ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
5029                                  const Loop *L, bool isSigned) {
5030  // Only handle:  "ADDREC < LoopInvariant".
5031  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
5032
5033  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
5034  if (!AddRec || AddRec->getLoop() != L)
5035    return getCouldNotCompute();
5036
5037  // Check to see if we have a flag which makes analysis easy.
5038  bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
5039                           AddRec->hasNoUnsignedWrap();
5040
5041  if (AddRec->isAffine()) {
5042    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
5043    const SCEV *Step = AddRec->getStepRecurrence(*this);
5044
5045    if (Step->isZero())
5046      return getCouldNotCompute();
5047    if (Step->isOne()) {
5048      // With unit stride, the iteration never steps past the limit value.
5049    } else if (isKnownPositive(Step)) {
5050      // Test whether a positive iteration can step past the limit
5051      // value and past the maximum value for its type in a single step.
5052      // Note that it's not sufficient to check NoWrap here, because even
5053      // though the value after a wrap is undefined, it's not undefined
5054      // behavior, so if wrap does occur, the loop could either terminate or
5055      // loop infinitely, but in either case, the loop is guaranteed to
5056      // iterate at least until the iteration where the wrapping occurs.
5057      const SCEV *One = getIntegerSCEV(1, Step->getType());
5058      if (isSigned) {
5059        APInt Max = APInt::getSignedMaxValue(BitWidth);
5060        if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
5061              .slt(getSignedRange(RHS).getSignedMax()))
5062          return getCouldNotCompute();
5063      } else {
5064        APInt Max = APInt::getMaxValue(BitWidth);
5065        if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
5066              .ult(getUnsignedRange(RHS).getUnsignedMax()))
5067          return getCouldNotCompute();
5068      }
5069    } else
5070      // TODO: Handle negative strides here and below.
5071      return getCouldNotCompute();
5072
5073    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5074    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
5075    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5076    // treat m-n as signed nor unsigned due to overflow possibility.
5077
5078    // First, we get the value of the LHS in the first iteration: n
5079    const SCEV *Start = AddRec->getOperand(0);
5080
5081    // Determine the minimum constant start value.
5082    const SCEV *MinStart = getConstant(isSigned ?
5083      getSignedRange(Start).getSignedMin() :
5084      getUnsignedRange(Start).getUnsignedMin());
5085
5086    // If we know that the condition is true in order to enter the loop,
5087    // then we know that it will run exactly (m-n)/s times. Otherwise, we
5088    // only know that it will execute (max(m,n)-n)/s times. In both cases,
5089    // the division must round up.
5090    const SCEV *End = RHS;
5091    if (!isLoopGuardedByCond(L,
5092                             isSigned ? ICmpInst::ICMP_SLT :
5093                                        ICmpInst::ICMP_ULT,
5094                             getMinusSCEV(Start, Step), RHS))
5095      End = isSigned ? getSMaxExpr(RHS, Start)
5096                     : getUMaxExpr(RHS, Start);
5097
5098    // Determine the maximum constant end value.
5099    const SCEV *MaxEnd = getConstant(isSigned ?
5100      getSignedRange(End).getSignedMax() :
5101      getUnsignedRange(End).getUnsignedMax());
5102
5103    // If MaxEnd is within a step of the maximum integer value in its type,
5104    // adjust it down to the minimum value which would produce the same effect.
5105    // This allows the subsequent ceiling divison of (N+(step-1))/step to
5106    // compute the correct value.
5107    const SCEV *StepMinusOne = getMinusSCEV(Step,
5108                                            getIntegerSCEV(1, Step->getType()));
5109    MaxEnd = isSigned ?
5110      getSMinExpr(MaxEnd,
5111                  getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
5112                               StepMinusOne)) :
5113      getUMinExpr(MaxEnd,
5114                  getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
5115                               StepMinusOne));
5116
5117    // Finally, we subtract these two values and divide, rounding up, to get
5118    // the number of times the backedge is executed.
5119    const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
5120
5121    // The maximum backedge count is similar, except using the minimum start
5122    // value and the maximum end value.
5123    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
5124
5125    return BackedgeTakenInfo(BECount, MaxBECount);
5126  }
5127
5128  return getCouldNotCompute();
5129}
5130
5131/// getNumIterationsInRange - Return the number of iterations of this loop that
5132/// produce values in the specified constant range.  Another way of looking at
5133/// this is that it returns the first iteration number where the value is not in
5134/// the condition, thus computing the exit count. If the iteration count can't
5135/// be computed, an instance of SCEVCouldNotCompute is returned.
5136const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
5137                                                    ScalarEvolution &SE) const {
5138  if (Range.isFullSet())  // Infinite loop.
5139    return SE.getCouldNotCompute();
5140
5141  // If the start is a non-zero constant, shift the range to simplify things.
5142  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
5143    if (!SC->getValue()->isZero()) {
5144      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
5145      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
5146      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
5147      if (const SCEVAddRecExpr *ShiftedAddRec =
5148            dyn_cast<SCEVAddRecExpr>(Shifted))
5149        return ShiftedAddRec->getNumIterationsInRange(
5150                           Range.subtract(SC->getValue()->getValue()), SE);
5151      // This is strange and shouldn't happen.
5152      return SE.getCouldNotCompute();
5153    }
5154
5155  // The only time we can solve this is when we have all constant indices.
5156  // Otherwise, we cannot determine the overflow conditions.
5157  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5158    if (!isa<SCEVConstant>(getOperand(i)))
5159      return SE.getCouldNotCompute();
5160
5161
5162  // Okay at this point we know that all elements of the chrec are constants and
5163  // that the start element is zero.
5164
5165  // First check to see if the range contains zero.  If not, the first
5166  // iteration exits.
5167  unsigned BitWidth = SE.getTypeSizeInBits(getType());
5168  if (!Range.contains(APInt(BitWidth, 0)))
5169    return SE.getIntegerSCEV(0, getType());
5170
5171  if (isAffine()) {
5172    // If this is an affine expression then we have this situation:
5173    //   Solve {0,+,A} in Range  ===  Ax in Range
5174
5175    // We know that zero is in the range.  If A is positive then we know that
5176    // the upper value of the range must be the first possible exit value.
5177    // If A is negative then the lower of the range is the last possible loop
5178    // value.  Also note that we already checked for a full range.
5179    APInt One(BitWidth,1);
5180    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
5181    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
5182
5183    // The exit value should be (End+A)/A.
5184    APInt ExitVal = (End + A).udiv(A);
5185    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
5186
5187    // Evaluate at the exit value.  If we really did fall out of the valid
5188    // range, then we computed our trip count, otherwise wrap around or other
5189    // things must have happened.
5190    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
5191    if (Range.contains(Val->getValue()))
5192      return SE.getCouldNotCompute();  // Something strange happened
5193
5194    // Ensure that the previous value is in the range.  This is a sanity check.
5195    assert(Range.contains(
5196           EvaluateConstantChrecAtConstant(this,
5197           ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
5198           "Linear scev computation is off in a bad way!");
5199    return SE.getConstant(ExitValue);
5200  } else if (isQuadratic()) {
5201    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5202    // quadratic equation to solve it.  To do this, we must frame our problem in
5203    // terms of figuring out when zero is crossed, instead of when
5204    // Range.getUpper() is crossed.
5205    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
5206    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
5207    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
5208
5209    // Next, solve the constructed addrec
5210    std::pair<const SCEV *,const SCEV *> Roots =
5211      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
5212    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5213    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5214    if (R1) {
5215      // Pick the smallest positive root value.
5216      if (ConstantInt *CB =
5217          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
5218                         R1->getValue(), R2->getValue()))) {
5219        if (CB->getZExtValue() == false)
5220          std::swap(R1, R2);   // R1 is the minimum root now.
5221
5222        // Make sure the root is not off by one.  The returned iteration should
5223        // not be in the range, but the previous one should be.  When solving
5224        // for "X*X < 5", for example, we should not return a root of 2.
5225        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
5226                                                             R1->getValue(),
5227                                                             SE);
5228        if (Range.contains(R1Val->getValue())) {
5229          // The next iteration must be out of the range...
5230          ConstantInt *NextVal =
5231                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
5232
5233          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5234          if (!Range.contains(R1Val->getValue()))
5235            return SE.getConstant(NextVal);
5236          return SE.getCouldNotCompute();  // Something strange happened
5237        }
5238
5239        // If R1 was not in the range, then it is a good return value.  Make
5240        // sure that R1-1 WAS in the range though, just in case.
5241        ConstantInt *NextVal =
5242               ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
5243        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5244        if (Range.contains(R1Val->getValue()))
5245          return R1;
5246        return SE.getCouldNotCompute();  // Something strange happened
5247      }
5248    }
5249  }
5250
5251  return SE.getCouldNotCompute();
5252}
5253
5254
5255
5256//===----------------------------------------------------------------------===//
5257//                   SCEVCallbackVH Class Implementation
5258//===----------------------------------------------------------------------===//
5259
5260void ScalarEvolution::SCEVCallbackVH::deleted() {
5261  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5262  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
5263    SE->ConstantEvolutionLoopExitValue.erase(PN);
5264  SE->Scalars.erase(getValPtr());
5265  // this now dangles!
5266}
5267
5268void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
5269  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5270
5271  // Forget all the expressions associated with users of the old value,
5272  // so that future queries will recompute the expressions using the new
5273  // value.
5274  SmallVector<User *, 16> Worklist;
5275  SmallPtrSet<User *, 8> Visited;
5276  Value *Old = getValPtr();
5277  bool DeleteOld = false;
5278  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
5279       UI != UE; ++UI)
5280    Worklist.push_back(*UI);
5281  while (!Worklist.empty()) {
5282    User *U = Worklist.pop_back_val();
5283    // Deleting the Old value will cause this to dangle. Postpone
5284    // that until everything else is done.
5285    if (U == Old) {
5286      DeleteOld = true;
5287      continue;
5288    }
5289    if (!Visited.insert(U))
5290      continue;
5291    if (PHINode *PN = dyn_cast<PHINode>(U))
5292      SE->ConstantEvolutionLoopExitValue.erase(PN);
5293    SE->Scalars.erase(U);
5294    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
5295         UI != UE; ++UI)
5296      Worklist.push_back(*UI);
5297  }
5298  // Delete the Old value if it (indirectly) references itself.
5299  if (DeleteOld) {
5300    if (PHINode *PN = dyn_cast<PHINode>(Old))
5301      SE->ConstantEvolutionLoopExitValue.erase(PN);
5302    SE->Scalars.erase(Old);
5303    // this now dangles!
5304  }
5305  // this may dangle!
5306}
5307
5308ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
5309  : CallbackVH(V), SE(se) {}
5310
5311//===----------------------------------------------------------------------===//
5312//                   ScalarEvolution Class Implementation
5313//===----------------------------------------------------------------------===//
5314
5315ScalarEvolution::ScalarEvolution()
5316  : FunctionPass(&ID) {
5317}
5318
5319bool ScalarEvolution::runOnFunction(Function &F) {
5320  this->F = &F;
5321  LI = &getAnalysis<LoopInfo>();
5322  DT = &getAnalysis<DominatorTree>();
5323  TD = getAnalysisIfAvailable<TargetData>();
5324  return false;
5325}
5326
5327void ScalarEvolution::releaseMemory() {
5328  Scalars.clear();
5329  BackedgeTakenCounts.clear();
5330  ConstantEvolutionLoopExitValue.clear();
5331  ValuesAtScopes.clear();
5332  UniqueSCEVs.clear();
5333  SCEVAllocator.Reset();
5334}
5335
5336void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
5337  AU.setPreservesAll();
5338  AU.addRequiredTransitive<LoopInfo>();
5339  AU.addRequiredTransitive<DominatorTree>();
5340}
5341
5342bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
5343  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
5344}
5345
5346static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
5347                          const Loop *L) {
5348  // Print all inner loops first
5349  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
5350    PrintLoopInfo(OS, SE, *I);
5351
5352  OS << "Loop ";
5353  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5354  OS << ": ";
5355
5356  SmallVector<BasicBlock *, 8> ExitBlocks;
5357  L->getExitBlocks(ExitBlocks);
5358  if (ExitBlocks.size() != 1)
5359    OS << "<multiple exits> ";
5360
5361  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5362    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5363  } else {
5364    OS << "Unpredictable backedge-taken count. ";
5365  }
5366
5367  OS << "\n"
5368        "Loop ";
5369  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5370  OS << ": ";
5371
5372  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5373    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5374  } else {
5375    OS << "Unpredictable max backedge-taken count. ";
5376  }
5377
5378  OS << "\n";
5379}
5380
5381void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
5382  // ScalarEvolution's implementaiton of the print method is to print
5383  // out SCEV values of all instructions that are interesting. Doing
5384  // this potentially causes it to create new SCEV objects though,
5385  // which technically conflicts with the const qualifier. This isn't
5386  // observable from outside the class though, so casting away the
5387  // const isn't dangerous.
5388  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
5389
5390  OS << "Classifying expressions for: ";
5391  WriteAsOperand(OS, F, /*PrintType=*/false);
5392  OS << "\n";
5393  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5394    if (isSCEVable(I->getType())) {
5395      OS << *I << '\n';
5396      OS << "  -->  ";
5397      const SCEV *SV = SE.getSCEV(&*I);
5398      SV->print(OS);
5399
5400      const Loop *L = LI->getLoopFor((*I).getParent());
5401
5402      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5403      if (AtUse != SV) {
5404        OS << "  -->  ";
5405        AtUse->print(OS);
5406      }
5407
5408      if (L) {
5409        OS << "\t\t" "Exits: ";
5410        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5411        if (!ExitValue->isLoopInvariant(L)) {
5412          OS << "<<Unknown>>";
5413        } else {
5414          OS << *ExitValue;
5415        }
5416      }
5417
5418      OS << "\n";
5419    }
5420
5421  OS << "Determining loop execution counts for: ";
5422  WriteAsOperand(OS, F, /*PrintType=*/false);
5423  OS << "\n";
5424  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5425    PrintLoopInfo(OS, &SE, *I);
5426}
5427
5428