ScalarEvolution.cpp revision b64cf896f8c6648b2c2bce15107d2892b909f367
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle. We only create one SCEV of a particular shape, so
18// pointer-comparisons for equality are legal.
19//
20// One important aspect of the SCEV objects is that they are never cyclic, even
21// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23// recurrence) then we represent it directly as a recurrence node, otherwise we
24// represent it as a SCEVUnknown node.
25//
26// In addition to being able to represent expressions of various types, we also
27// have folders that are used to build the *canonical* representation for a
28// particular expression.  These folders are capable of using a variety of
29// rewrite rules to simplify the expressions.
30//
31// Once the folders are defined, we can implement the more interesting
32// higher-level code, such as the code that recognizes PHI nodes of various
33// types, computes the execution count of a loop, etc.
34//
35// TODO: We should use these routines and value representations to implement
36// dependence analysis!
37//
38//===----------------------------------------------------------------------===//
39//
40// There are several good references for the techniques used in this analysis.
41//
42//  Chains of recurrences -- a method to expedite the evaluation
43//  of closed-form functions
44//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45//
46//  On computational properties of chains of recurrences
47//  Eugene V. Zima
48//
49//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50//  Robert A. van Engelen
51//
52//  Efficient Symbolic Analysis for Optimizing Compilers
53//  Robert A. van Engelen
54//
55//  Using the chains of recurrences algebra for data dependence testing and
56//  induction variable substitution
57//  MS Thesis, Johnie Birch
58//
59//===----------------------------------------------------------------------===//
60
61#define DEBUG_TYPE "scalar-evolution"
62#include "llvm/Analysis/ScalarEvolutionExpressions.h"
63#include "llvm/Constants.h"
64#include "llvm/DerivedTypes.h"
65#include "llvm/GlobalVariable.h"
66#include "llvm/GlobalAlias.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Operator.h"
70#include "llvm/Analysis/ConstantFolding.h"
71#include "llvm/Analysis/Dominators.h"
72#include "llvm/Analysis/LoopInfo.h"
73#include "llvm/Analysis/ValueTracking.h"
74#include "llvm/Assembly/Writer.h"
75#include "llvm/Target/TargetData.h"
76#include "llvm/Support/CommandLine.h"
77#include "llvm/Support/ConstantRange.h"
78#include "llvm/Support/Debug.h"
79#include "llvm/Support/ErrorHandling.h"
80#include "llvm/Support/GetElementPtrTypeIterator.h"
81#include "llvm/Support/InstIterator.h"
82#include "llvm/Support/MathExtras.h"
83#include "llvm/Support/raw_ostream.h"
84#include "llvm/ADT/Statistic.h"
85#include "llvm/ADT/STLExtras.h"
86#include "llvm/ADT/SmallPtrSet.h"
87#include <algorithm>
88using namespace llvm;
89
90STATISTIC(NumArrayLenItCounts,
91          "Number of trip counts computed with array length");
92STATISTIC(NumTripCountsComputed,
93          "Number of loops with predictable loop counts");
94STATISTIC(NumTripCountsNotComputed,
95          "Number of loops without predictable loop counts");
96STATISTIC(NumBruteForceTripCountsComputed,
97          "Number of loops with trip counts computed by force");
98
99static cl::opt<unsigned>
100MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
101                        cl::desc("Maximum number of iterations SCEV will "
102                                 "symbolically execute a constant "
103                                 "derived loop"),
104                        cl::init(100));
105
106static RegisterPass<ScalarEvolution>
107R("scalar-evolution", "Scalar Evolution Analysis", false, true);
108char ScalarEvolution::ID = 0;
109
110//===----------------------------------------------------------------------===//
111//                           SCEV class definitions
112//===----------------------------------------------------------------------===//
113
114//===----------------------------------------------------------------------===//
115// Implementation of the SCEV class.
116//
117
118SCEV::~SCEV() {}
119
120void SCEV::dump() const {
121  print(dbgs());
122  dbgs() << '\n';
123}
124
125bool SCEV::isZero() const {
126  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
127    return SC->getValue()->isZero();
128  return false;
129}
130
131bool SCEV::isOne() const {
132  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
133    return SC->getValue()->isOne();
134  return false;
135}
136
137bool SCEV::isAllOnesValue() const {
138  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
139    return SC->getValue()->isAllOnesValue();
140  return false;
141}
142
143SCEVCouldNotCompute::SCEVCouldNotCompute() :
144  SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
145
146bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
147  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
148  return false;
149}
150
151const Type *SCEVCouldNotCompute::getType() const {
152  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153  return 0;
154}
155
156bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
157  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158  return false;
159}
160
161bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
162  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
163  return false;
164}
165
166void SCEVCouldNotCompute::print(raw_ostream &OS) const {
167  OS << "***COULDNOTCOMPUTE***";
168}
169
170bool SCEVCouldNotCompute::classof(const SCEV *S) {
171  return S->getSCEVType() == scCouldNotCompute;
172}
173
174const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
175  FoldingSetNodeID ID;
176  ID.AddInteger(scConstant);
177  ID.AddPointer(V);
178  void *IP = 0;
179  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
180  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
181  UniqueSCEVs.InsertNode(S, IP);
182  return S;
183}
184
185const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
186  return getConstant(ConstantInt::get(getContext(), Val));
187}
188
189const SCEV *
190ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
191  return getConstant(
192    ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
193}
194
195const Type *SCEVConstant::getType() const { return V->getType(); }
196
197void SCEVConstant::print(raw_ostream &OS) const {
198  WriteAsOperand(OS, V, false);
199}
200
201SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
202                           unsigned SCEVTy, const SCEV *op, const Type *ty)
203  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
204
205bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
206  return Op->dominates(BB, DT);
207}
208
209bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
210  return Op->properlyDominates(BB, DT);
211}
212
213SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
214                                   const SCEV *op, const Type *ty)
215  : SCEVCastExpr(ID, scTruncate, op, ty) {
216  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
217         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
218         "Cannot truncate non-integer value!");
219}
220
221void SCEVTruncateExpr::print(raw_ostream &OS) const {
222  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
223}
224
225SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
226                                       const SCEV *op, const Type *ty)
227  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
228  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
229         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
230         "Cannot zero extend non-integer value!");
231}
232
233void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
234  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
235}
236
237SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
238                                       const SCEV *op, const Type *ty)
239  : SCEVCastExpr(ID, scSignExtend, op, ty) {
240  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
241         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
242         "Cannot sign extend non-integer value!");
243}
244
245void SCEVSignExtendExpr::print(raw_ostream &OS) const {
246  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
247}
248
249void SCEVCommutativeExpr::print(raw_ostream &OS) const {
250  assert(NumOperands > 1 && "This plus expr shouldn't exist!");
251  const char *OpStr = getOperationStr();
252  OS << "(" << *Operands[0];
253  for (unsigned i = 1, e = NumOperands; i != e; ++i)
254    OS << OpStr << *Operands[i];
255  OS << ")";
256}
257
258bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
259  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
260    if (!getOperand(i)->dominates(BB, DT))
261      return false;
262  }
263  return true;
264}
265
266bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
267  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
268    if (!getOperand(i)->properlyDominates(BB, DT))
269      return false;
270  }
271  return true;
272}
273
274bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
275  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
276}
277
278bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
279  return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
280}
281
282void SCEVUDivExpr::print(raw_ostream &OS) const {
283  OS << "(" << *LHS << " /u " << *RHS << ")";
284}
285
286const Type *SCEVUDivExpr::getType() const {
287  // In most cases the types of LHS and RHS will be the same, but in some
288  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
289  // depend on the type for correctness, but handling types carefully can
290  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
291  // a pointer type than the RHS, so use the RHS' type here.
292  return RHS->getType();
293}
294
295bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
296  // Add recurrences are never invariant in the function-body (null loop).
297  if (!QueryLoop)
298    return false;
299
300  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
301  if (QueryLoop->contains(L))
302    return false;
303
304  // This recurrence is variant w.r.t. QueryLoop if any of its operands
305  // are variant.
306  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
307    if (!getOperand(i)->isLoopInvariant(QueryLoop))
308      return false;
309
310  // Otherwise it's loop-invariant.
311  return true;
312}
313
314bool
315SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
316  return DT->dominates(L->getHeader(), BB) &&
317         SCEVNAryExpr::dominates(BB, DT);
318}
319
320bool
321SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
322  // This uses a "dominates" query instead of "properly dominates" query because
323  // the instruction which produces the addrec's value is a PHI, and a PHI
324  // effectively properly dominates its entire containing block.
325  return DT->dominates(L->getHeader(), BB) &&
326         SCEVNAryExpr::properlyDominates(BB, DT);
327}
328
329void SCEVAddRecExpr::print(raw_ostream &OS) const {
330  OS << "{" << *Operands[0];
331  for (unsigned i = 1, e = NumOperands; i != e; ++i)
332    OS << ",+," << *Operands[i];
333  OS << "}<";
334  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
335  OS << ">";
336}
337
338bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
339  // All non-instruction values are loop invariant.  All instructions are loop
340  // invariant if they are not contained in the specified loop.
341  // Instructions are never considered invariant in the function body
342  // (null loop) because they are defined within the "loop".
343  if (Instruction *I = dyn_cast<Instruction>(V))
344    return L && !L->contains(I);
345  return true;
346}
347
348bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
349  if (Instruction *I = dyn_cast<Instruction>(getValue()))
350    return DT->dominates(I->getParent(), BB);
351  return true;
352}
353
354bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
355  if (Instruction *I = dyn_cast<Instruction>(getValue()))
356    return DT->properlyDominates(I->getParent(), BB);
357  return true;
358}
359
360const Type *SCEVUnknown::getType() const {
361  return V->getType();
362}
363
364bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
365  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
366    if (VCE->getOpcode() == Instruction::PtrToInt)
367      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
368        if (CE->getOpcode() == Instruction::GetElementPtr &&
369            CE->getOperand(0)->isNullValue() &&
370            CE->getNumOperands() == 2)
371          if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
372            if (CI->isOne()) {
373              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
374                                 ->getElementType();
375              return true;
376            }
377
378  return false;
379}
380
381bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
382  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
383    if (VCE->getOpcode() == Instruction::PtrToInt)
384      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
385        if (CE->getOpcode() == Instruction::GetElementPtr &&
386            CE->getOperand(0)->isNullValue()) {
387          const Type *Ty =
388            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
389          if (const StructType *STy = dyn_cast<StructType>(Ty))
390            if (!STy->isPacked() &&
391                CE->getNumOperands() == 3 &&
392                CE->getOperand(1)->isNullValue()) {
393              if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
394                if (CI->isOne() &&
395                    STy->getNumElements() == 2 &&
396                    STy->getElementType(0)->isIntegerTy(1)) {
397                  AllocTy = STy->getElementType(1);
398                  return true;
399                }
400            }
401        }
402
403  return false;
404}
405
406bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
407  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
408    if (VCE->getOpcode() == Instruction::PtrToInt)
409      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
410        if (CE->getOpcode() == Instruction::GetElementPtr &&
411            CE->getNumOperands() == 3 &&
412            CE->getOperand(0)->isNullValue() &&
413            CE->getOperand(1)->isNullValue()) {
414          const Type *Ty =
415            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
416          // Ignore vector types here so that ScalarEvolutionExpander doesn't
417          // emit getelementptrs that index into vectors.
418          if (Ty->isStructTy() || Ty->isArrayTy()) {
419            CTy = Ty;
420            FieldNo = CE->getOperand(2);
421            return true;
422          }
423        }
424
425  return false;
426}
427
428void SCEVUnknown::print(raw_ostream &OS) const {
429  const Type *AllocTy;
430  if (isSizeOf(AllocTy)) {
431    OS << "sizeof(" << *AllocTy << ")";
432    return;
433  }
434  if (isAlignOf(AllocTy)) {
435    OS << "alignof(" << *AllocTy << ")";
436    return;
437  }
438
439  const Type *CTy;
440  Constant *FieldNo;
441  if (isOffsetOf(CTy, FieldNo)) {
442    OS << "offsetof(" << *CTy << ", ";
443    WriteAsOperand(OS, FieldNo, false);
444    OS << ")";
445    return;
446  }
447
448  // Otherwise just print it normally.
449  WriteAsOperand(OS, V, false);
450}
451
452//===----------------------------------------------------------------------===//
453//                               SCEV Utilities
454//===----------------------------------------------------------------------===//
455
456static bool CompareTypes(const Type *A, const Type *B) {
457  if (A->getTypeID() != B->getTypeID())
458    return A->getTypeID() < B->getTypeID();
459  if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
460    const IntegerType *BI = cast<IntegerType>(B);
461    return AI->getBitWidth() < BI->getBitWidth();
462  }
463  if (const PointerType *AI = dyn_cast<PointerType>(A)) {
464    const PointerType *BI = cast<PointerType>(B);
465    return CompareTypes(AI->getElementType(), BI->getElementType());
466  }
467  if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
468    const ArrayType *BI = cast<ArrayType>(B);
469    if (AI->getNumElements() != BI->getNumElements())
470      return AI->getNumElements() < BI->getNumElements();
471    return CompareTypes(AI->getElementType(), BI->getElementType());
472  }
473  if (const VectorType *AI = dyn_cast<VectorType>(A)) {
474    const VectorType *BI = cast<VectorType>(B);
475    if (AI->getNumElements() != BI->getNumElements())
476      return AI->getNumElements() < BI->getNumElements();
477    return CompareTypes(AI->getElementType(), BI->getElementType());
478  }
479  if (const StructType *AI = dyn_cast<StructType>(A)) {
480    const StructType *BI = cast<StructType>(B);
481    if (AI->getNumElements() != BI->getNumElements())
482      return AI->getNumElements() < BI->getNumElements();
483    for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
484      if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
485          CompareTypes(BI->getElementType(i), AI->getElementType(i)))
486        return CompareTypes(AI->getElementType(i), BI->getElementType(i));
487  }
488  return false;
489}
490
491namespace {
492  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
493  /// than the complexity of the RHS.  This comparator is used to canonicalize
494  /// expressions.
495  class SCEVComplexityCompare {
496    LoopInfo *LI;
497  public:
498    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
499
500    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
501      // Fast-path: SCEVs are uniqued so we can do a quick equality check.
502      if (LHS == RHS)
503        return false;
504
505      // Primarily, sort the SCEVs by their getSCEVType().
506      if (LHS->getSCEVType() != RHS->getSCEVType())
507        return LHS->getSCEVType() < RHS->getSCEVType();
508
509      // Aside from the getSCEVType() ordering, the particular ordering
510      // isn't very important except that it's beneficial to be consistent,
511      // so that (a + b) and (b + a) don't end up as different expressions.
512
513      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
514      // not as complete as it could be.
515      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
516        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
517
518        // Order pointer values after integer values. This helps SCEVExpander
519        // form GEPs.
520        if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy())
521          return false;
522        if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy())
523          return true;
524
525        // Compare getValueID values.
526        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
527          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
528
529        // Sort arguments by their position.
530        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
531          const Argument *RA = cast<Argument>(RU->getValue());
532          return LA->getArgNo() < RA->getArgNo();
533        }
534
535        // For instructions, compare their loop depth, and their opcode.
536        // This is pretty loose.
537        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
538          Instruction *RV = cast<Instruction>(RU->getValue());
539
540          // Compare loop depths.
541          if (LI->getLoopDepth(LV->getParent()) !=
542              LI->getLoopDepth(RV->getParent()))
543            return LI->getLoopDepth(LV->getParent()) <
544                   LI->getLoopDepth(RV->getParent());
545
546          // Compare opcodes.
547          if (LV->getOpcode() != RV->getOpcode())
548            return LV->getOpcode() < RV->getOpcode();
549
550          // Compare the number of operands.
551          if (LV->getNumOperands() != RV->getNumOperands())
552            return LV->getNumOperands() < RV->getNumOperands();
553        }
554
555        return false;
556      }
557
558      // Compare constant values.
559      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
560        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
561        if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
562          return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
563        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
564      }
565
566      // Compare addrec loop depths.
567      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
568        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
569        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
570          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
571      }
572
573      // Lexicographically compare n-ary expressions.
574      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
575        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
576        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
577          if (i >= RC->getNumOperands())
578            return false;
579          if (operator()(LC->getOperand(i), RC->getOperand(i)))
580            return true;
581          if (operator()(RC->getOperand(i), LC->getOperand(i)))
582            return false;
583        }
584        return LC->getNumOperands() < RC->getNumOperands();
585      }
586
587      // Lexicographically compare udiv expressions.
588      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
589        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
590        if (operator()(LC->getLHS(), RC->getLHS()))
591          return true;
592        if (operator()(RC->getLHS(), LC->getLHS()))
593          return false;
594        if (operator()(LC->getRHS(), RC->getRHS()))
595          return true;
596        if (operator()(RC->getRHS(), LC->getRHS()))
597          return false;
598        return false;
599      }
600
601      // Compare cast expressions by operand.
602      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
603        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
604        return operator()(LC->getOperand(), RC->getOperand());
605      }
606
607      llvm_unreachable("Unknown SCEV kind!");
608      return false;
609    }
610  };
611}
612
613/// GroupByComplexity - Given a list of SCEV objects, order them by their
614/// complexity, and group objects of the same complexity together by value.
615/// When this routine is finished, we know that any duplicates in the vector are
616/// consecutive and that complexity is monotonically increasing.
617///
618/// Note that we go take special precautions to ensure that we get deterministic
619/// results from this routine.  In other words, we don't want the results of
620/// this to depend on where the addresses of various SCEV objects happened to
621/// land in memory.
622///
623static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
624                              LoopInfo *LI) {
625  if (Ops.size() < 2) return;  // Noop
626  if (Ops.size() == 2) {
627    // This is the common case, which also happens to be trivially simple.
628    // Special case it.
629    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
630      std::swap(Ops[0], Ops[1]);
631    return;
632  }
633
634  // Do the rough sort by complexity.
635  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
636
637  // Now that we are sorted by complexity, group elements of the same
638  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
639  // be extremely short in practice.  Note that we take this approach because we
640  // do not want to depend on the addresses of the objects we are grouping.
641  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
642    const SCEV *S = Ops[i];
643    unsigned Complexity = S->getSCEVType();
644
645    // If there are any objects of the same complexity and same value as this
646    // one, group them.
647    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
648      if (Ops[j] == S) { // Found a duplicate.
649        // Move it to immediately after i'th element.
650        std::swap(Ops[i+1], Ops[j]);
651        ++i;   // no need to rescan it.
652        if (i == e-2) return;  // Done!
653      }
654    }
655  }
656}
657
658
659
660//===----------------------------------------------------------------------===//
661//                      Simple SCEV method implementations
662//===----------------------------------------------------------------------===//
663
664/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
665/// Assume, K > 0.
666static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
667                                       ScalarEvolution &SE,
668                                       const Type* ResultTy) {
669  // Handle the simplest case efficiently.
670  if (K == 1)
671    return SE.getTruncateOrZeroExtend(It, ResultTy);
672
673  // We are using the following formula for BC(It, K):
674  //
675  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
676  //
677  // Suppose, W is the bitwidth of the return value.  We must be prepared for
678  // overflow.  Hence, we must assure that the result of our computation is
679  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
680  // safe in modular arithmetic.
681  //
682  // However, this code doesn't use exactly that formula; the formula it uses
683  // is something like the following, where T is the number of factors of 2 in
684  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
685  // exponentiation:
686  //
687  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
688  //
689  // This formula is trivially equivalent to the previous formula.  However,
690  // this formula can be implemented much more efficiently.  The trick is that
691  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
692  // arithmetic.  To do exact division in modular arithmetic, all we have
693  // to do is multiply by the inverse.  Therefore, this step can be done at
694  // width W.
695  //
696  // The next issue is how to safely do the division by 2^T.  The way this
697  // is done is by doing the multiplication step at a width of at least W + T
698  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
699  // when we perform the division by 2^T (which is equivalent to a right shift
700  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
701  // truncated out after the division by 2^T.
702  //
703  // In comparison to just directly using the first formula, this technique
704  // is much more efficient; using the first formula requires W * K bits,
705  // but this formula less than W + K bits. Also, the first formula requires
706  // a division step, whereas this formula only requires multiplies and shifts.
707  //
708  // It doesn't matter whether the subtraction step is done in the calculation
709  // width or the input iteration count's width; if the subtraction overflows,
710  // the result must be zero anyway.  We prefer here to do it in the width of
711  // the induction variable because it helps a lot for certain cases; CodeGen
712  // isn't smart enough to ignore the overflow, which leads to much less
713  // efficient code if the width of the subtraction is wider than the native
714  // register width.
715  //
716  // (It's possible to not widen at all by pulling out factors of 2 before
717  // the multiplication; for example, K=2 can be calculated as
718  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
719  // extra arithmetic, so it's not an obvious win, and it gets
720  // much more complicated for K > 3.)
721
722  // Protection from insane SCEVs; this bound is conservative,
723  // but it probably doesn't matter.
724  if (K > 1000)
725    return SE.getCouldNotCompute();
726
727  unsigned W = SE.getTypeSizeInBits(ResultTy);
728
729  // Calculate K! / 2^T and T; we divide out the factors of two before
730  // multiplying for calculating K! / 2^T to avoid overflow.
731  // Other overflow doesn't matter because we only care about the bottom
732  // W bits of the result.
733  APInt OddFactorial(W, 1);
734  unsigned T = 1;
735  for (unsigned i = 3; i <= K; ++i) {
736    APInt Mult(W, i);
737    unsigned TwoFactors = Mult.countTrailingZeros();
738    T += TwoFactors;
739    Mult = Mult.lshr(TwoFactors);
740    OddFactorial *= Mult;
741  }
742
743  // We need at least W + T bits for the multiplication step
744  unsigned CalculationBits = W + T;
745
746  // Calculate 2^T, at width T+W.
747  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
748
749  // Calculate the multiplicative inverse of K! / 2^T;
750  // this multiplication factor will perform the exact division by
751  // K! / 2^T.
752  APInt Mod = APInt::getSignedMinValue(W+1);
753  APInt MultiplyFactor = OddFactorial.zext(W+1);
754  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
755  MultiplyFactor = MultiplyFactor.trunc(W);
756
757  // Calculate the product, at width T+W
758  const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
759                                                      CalculationBits);
760  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
761  for (unsigned i = 1; i != K; ++i) {
762    const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
763    Dividend = SE.getMulExpr(Dividend,
764                             SE.getTruncateOrZeroExtend(S, CalculationTy));
765  }
766
767  // Divide by 2^T
768  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
769
770  // Truncate the result, and divide by K! / 2^T.
771
772  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
773                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
774}
775
776/// evaluateAtIteration - Return the value of this chain of recurrences at
777/// the specified iteration number.  We can evaluate this recurrence by
778/// multiplying each element in the chain by the binomial coefficient
779/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
780///
781///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
782///
783/// where BC(It, k) stands for binomial coefficient.
784///
785const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
786                                                ScalarEvolution &SE) const {
787  const SCEV *Result = getStart();
788  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
789    // The computation is correct in the face of overflow provided that the
790    // multiplication is performed _after_ the evaluation of the binomial
791    // coefficient.
792    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
793    if (isa<SCEVCouldNotCompute>(Coeff))
794      return Coeff;
795
796    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
797  }
798  return Result;
799}
800
801//===----------------------------------------------------------------------===//
802//                    SCEV Expression folder implementations
803//===----------------------------------------------------------------------===//
804
805const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
806                                             const Type *Ty) {
807  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
808         "This is not a truncating conversion!");
809  assert(isSCEVable(Ty) &&
810         "This is not a conversion to a SCEVable type!");
811  Ty = getEffectiveSCEVType(Ty);
812
813  FoldingSetNodeID ID;
814  ID.AddInteger(scTruncate);
815  ID.AddPointer(Op);
816  ID.AddPointer(Ty);
817  void *IP = 0;
818  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
819
820  // Fold if the operand is constant.
821  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
822    return getConstant(
823      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
824
825  // trunc(trunc(x)) --> trunc(x)
826  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
827    return getTruncateExpr(ST->getOperand(), Ty);
828
829  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
830  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
831    return getTruncateOrSignExtend(SS->getOperand(), Ty);
832
833  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
834  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
835    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
836
837  // If the input value is a chrec scev, truncate the chrec's operands.
838  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
839    SmallVector<const SCEV *, 4> Operands;
840    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
841      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
842    return getAddRecExpr(Operands, AddRec->getLoop());
843  }
844
845  // The cast wasn't folded; create an explicit cast node.
846  // Recompute the insert position, as it may have been invalidated.
847  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
848  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
849                                                 Op, Ty);
850  UniqueSCEVs.InsertNode(S, IP);
851  return S;
852}
853
854const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
855                                               const Type *Ty) {
856  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
857         "This is not an extending conversion!");
858  assert(isSCEVable(Ty) &&
859         "This is not a conversion to a SCEVable type!");
860  Ty = getEffectiveSCEVType(Ty);
861
862  // Fold if the operand is constant.
863  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
864    const Type *IntTy = getEffectiveSCEVType(Ty);
865    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
866    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
867    return getConstant(cast<ConstantInt>(C));
868  }
869
870  // zext(zext(x)) --> zext(x)
871  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
872    return getZeroExtendExpr(SZ->getOperand(), Ty);
873
874  // Before doing any expensive analysis, check to see if we've already
875  // computed a SCEV for this Op and Ty.
876  FoldingSetNodeID ID;
877  ID.AddInteger(scZeroExtend);
878  ID.AddPointer(Op);
879  ID.AddPointer(Ty);
880  void *IP = 0;
881  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
882
883  // If the input value is a chrec scev, and we can prove that the value
884  // did not overflow the old, smaller, value, we can zero extend all of the
885  // operands (often constants).  This allows analysis of something like
886  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
887  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
888    if (AR->isAffine()) {
889      const SCEV *Start = AR->getStart();
890      const SCEV *Step = AR->getStepRecurrence(*this);
891      unsigned BitWidth = getTypeSizeInBits(AR->getType());
892      const Loop *L = AR->getLoop();
893
894      // If we have special knowledge that this addrec won't overflow,
895      // we don't need to do any further analysis.
896      if (AR->hasNoUnsignedWrap())
897        return getAddRecExpr(getZeroExtendExpr(Start, Ty),
898                             getZeroExtendExpr(Step, Ty),
899                             L);
900
901      // Check whether the backedge-taken count is SCEVCouldNotCompute.
902      // Note that this serves two purposes: It filters out loops that are
903      // simply not analyzable, and it covers the case where this code is
904      // being called from within backedge-taken count analysis, such that
905      // attempting to ask for the backedge-taken count would likely result
906      // in infinite recursion. In the later case, the analysis code will
907      // cope with a conservative value, and it will take care to purge
908      // that value once it has finished.
909      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
910      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
911        // Manually compute the final value for AR, checking for
912        // overflow.
913
914        // Check whether the backedge-taken count can be losslessly casted to
915        // the addrec's type. The count is always unsigned.
916        const SCEV *CastedMaxBECount =
917          getTruncateOrZeroExtend(MaxBECount, Start->getType());
918        const SCEV *RecastedMaxBECount =
919          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
920        if (MaxBECount == RecastedMaxBECount) {
921          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
922          // Check whether Start+Step*MaxBECount has no unsigned overflow.
923          const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
924          const SCEV *Add = getAddExpr(Start, ZMul);
925          const SCEV *OperandExtendedAdd =
926            getAddExpr(getZeroExtendExpr(Start, WideTy),
927                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
928                                  getZeroExtendExpr(Step, WideTy)));
929          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
930            // Return the expression with the addrec on the outside.
931            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
932                                 getZeroExtendExpr(Step, Ty),
933                                 L);
934
935          // Similar to above, only this time treat the step value as signed.
936          // This covers loops that count down.
937          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
938          Add = getAddExpr(Start, SMul);
939          OperandExtendedAdd =
940            getAddExpr(getZeroExtendExpr(Start, WideTy),
941                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
942                                  getSignExtendExpr(Step, WideTy)));
943          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
944            // Return the expression with the addrec on the outside.
945            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
946                                 getSignExtendExpr(Step, Ty),
947                                 L);
948        }
949
950        // If the backedge is guarded by a comparison with the pre-inc value
951        // the addrec is safe. Also, if the entry is guarded by a comparison
952        // with the start value and the backedge is guarded by a comparison
953        // with the post-inc value, the addrec is safe.
954        if (isKnownPositive(Step)) {
955          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
956                                      getUnsignedRange(Step).getUnsignedMax());
957          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
958              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
959               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
960                                           AR->getPostIncExpr(*this), N)))
961            // Return the expression with the addrec on the outside.
962            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
963                                 getZeroExtendExpr(Step, Ty),
964                                 L);
965        } else if (isKnownNegative(Step)) {
966          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
967                                      getSignedRange(Step).getSignedMin());
968          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
969              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
970               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
971                                           AR->getPostIncExpr(*this), N)))
972            // Return the expression with the addrec on the outside.
973            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
974                                 getSignExtendExpr(Step, Ty),
975                                 L);
976        }
977      }
978    }
979
980  // The cast wasn't folded; create an explicit cast node.
981  // Recompute the insert position, as it may have been invalidated.
982  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
983  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
984                                                   Op, Ty);
985  UniqueSCEVs.InsertNode(S, IP);
986  return S;
987}
988
989const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
990                                               const Type *Ty) {
991  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
992         "This is not an extending conversion!");
993  assert(isSCEVable(Ty) &&
994         "This is not a conversion to a SCEVable type!");
995  Ty = getEffectiveSCEVType(Ty);
996
997  // Fold if the operand is constant.
998  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
999    const Type *IntTy = getEffectiveSCEVType(Ty);
1000    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
1001    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
1002    return getConstant(cast<ConstantInt>(C));
1003  }
1004
1005  // sext(sext(x)) --> sext(x)
1006  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1007    return getSignExtendExpr(SS->getOperand(), Ty);
1008
1009  // Before doing any expensive analysis, check to see if we've already
1010  // computed a SCEV for this Op and Ty.
1011  FoldingSetNodeID ID;
1012  ID.AddInteger(scSignExtend);
1013  ID.AddPointer(Op);
1014  ID.AddPointer(Ty);
1015  void *IP = 0;
1016  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1017
1018  // If the input value is a chrec scev, and we can prove that the value
1019  // did not overflow the old, smaller, value, we can sign extend all of the
1020  // operands (often constants).  This allows analysis of something like
1021  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1022  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1023    if (AR->isAffine()) {
1024      const SCEV *Start = AR->getStart();
1025      const SCEV *Step = AR->getStepRecurrence(*this);
1026      unsigned BitWidth = getTypeSizeInBits(AR->getType());
1027      const Loop *L = AR->getLoop();
1028
1029      // If we have special knowledge that this addrec won't overflow,
1030      // we don't need to do any further analysis.
1031      if (AR->hasNoSignedWrap())
1032        return getAddRecExpr(getSignExtendExpr(Start, Ty),
1033                             getSignExtendExpr(Step, Ty),
1034                             L);
1035
1036      // Check whether the backedge-taken count is SCEVCouldNotCompute.
1037      // Note that this serves two purposes: It filters out loops that are
1038      // simply not analyzable, and it covers the case where this code is
1039      // being called from within backedge-taken count analysis, such that
1040      // attempting to ask for the backedge-taken count would likely result
1041      // in infinite recursion. In the later case, the analysis code will
1042      // cope with a conservative value, and it will take care to purge
1043      // that value once it has finished.
1044      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1045      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1046        // Manually compute the final value for AR, checking for
1047        // overflow.
1048
1049        // Check whether the backedge-taken count can be losslessly casted to
1050        // the addrec's type. The count is always unsigned.
1051        const SCEV *CastedMaxBECount =
1052          getTruncateOrZeroExtend(MaxBECount, Start->getType());
1053        const SCEV *RecastedMaxBECount =
1054          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1055        if (MaxBECount == RecastedMaxBECount) {
1056          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1057          // Check whether Start+Step*MaxBECount has no signed overflow.
1058          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1059          const SCEV *Add = getAddExpr(Start, SMul);
1060          const SCEV *OperandExtendedAdd =
1061            getAddExpr(getSignExtendExpr(Start, WideTy),
1062                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1063                                  getSignExtendExpr(Step, WideTy)));
1064          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1065            // Return the expression with the addrec on the outside.
1066            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1067                                 getSignExtendExpr(Step, Ty),
1068                                 L);
1069
1070          // Similar to above, only this time treat the step value as unsigned.
1071          // This covers loops that count up with an unsigned step.
1072          const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
1073          Add = getAddExpr(Start, UMul);
1074          OperandExtendedAdd =
1075            getAddExpr(getSignExtendExpr(Start, WideTy),
1076                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1077                                  getZeroExtendExpr(Step, WideTy)));
1078          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1079            // Return the expression with the addrec on the outside.
1080            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1081                                 getZeroExtendExpr(Step, Ty),
1082                                 L);
1083        }
1084
1085        // If the backedge is guarded by a comparison with the pre-inc value
1086        // the addrec is safe. Also, if the entry is guarded by a comparison
1087        // with the start value and the backedge is guarded by a comparison
1088        // with the post-inc value, the addrec is safe.
1089        if (isKnownPositive(Step)) {
1090          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
1091                                      getSignedRange(Step).getSignedMax());
1092          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
1093              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
1094               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
1095                                           AR->getPostIncExpr(*this), N)))
1096            // Return the expression with the addrec on the outside.
1097            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1098                                 getSignExtendExpr(Step, Ty),
1099                                 L);
1100        } else if (isKnownNegative(Step)) {
1101          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
1102                                      getSignedRange(Step).getSignedMin());
1103          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1104              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1105               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1106                                           AR->getPostIncExpr(*this), N)))
1107            // Return the expression with the addrec on the outside.
1108            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1109                                 getSignExtendExpr(Step, Ty),
1110                                 L);
1111        }
1112      }
1113    }
1114
1115  // The cast wasn't folded; create an explicit cast node.
1116  // Recompute the insert position, as it may have been invalidated.
1117  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1118  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1119                                                   Op, Ty);
1120  UniqueSCEVs.InsertNode(S, IP);
1121  return S;
1122}
1123
1124/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1125/// unspecified bits out to the given type.
1126///
1127const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1128                                              const Type *Ty) {
1129  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1130         "This is not an extending conversion!");
1131  assert(isSCEVable(Ty) &&
1132         "This is not a conversion to a SCEVable type!");
1133  Ty = getEffectiveSCEVType(Ty);
1134
1135  // Sign-extend negative constants.
1136  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1137    if (SC->getValue()->getValue().isNegative())
1138      return getSignExtendExpr(Op, Ty);
1139
1140  // Peel off a truncate cast.
1141  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1142    const SCEV *NewOp = T->getOperand();
1143    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1144      return getAnyExtendExpr(NewOp, Ty);
1145    return getTruncateOrNoop(NewOp, Ty);
1146  }
1147
1148  // Next try a zext cast. If the cast is folded, use it.
1149  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1150  if (!isa<SCEVZeroExtendExpr>(ZExt))
1151    return ZExt;
1152
1153  // Next try a sext cast. If the cast is folded, use it.
1154  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1155  if (!isa<SCEVSignExtendExpr>(SExt))
1156    return SExt;
1157
1158  // Force the cast to be folded into the operands of an addrec.
1159  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1160    SmallVector<const SCEV *, 4> Ops;
1161    for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1162         I != E; ++I)
1163      Ops.push_back(getAnyExtendExpr(*I, Ty));
1164    return getAddRecExpr(Ops, AR->getLoop());
1165  }
1166
1167  // If the expression is obviously signed, use the sext cast value.
1168  if (isa<SCEVSMaxExpr>(Op))
1169    return SExt;
1170
1171  // Absent any other information, use the zext cast value.
1172  return ZExt;
1173}
1174
1175/// CollectAddOperandsWithScales - Process the given Ops list, which is
1176/// a list of operands to be added under the given scale, update the given
1177/// map. This is a helper function for getAddRecExpr. As an example of
1178/// what it does, given a sequence of operands that would form an add
1179/// expression like this:
1180///
1181///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1182///
1183/// where A and B are constants, update the map with these values:
1184///
1185///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1186///
1187/// and add 13 + A*B*29 to AccumulatedConstant.
1188/// This will allow getAddRecExpr to produce this:
1189///
1190///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1191///
1192/// This form often exposes folding opportunities that are hidden in
1193/// the original operand list.
1194///
1195/// Return true iff it appears that any interesting folding opportunities
1196/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1197/// the common case where no interesting opportunities are present, and
1198/// is also used as a check to avoid infinite recursion.
1199///
1200static bool
1201CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1202                             SmallVector<const SCEV *, 8> &NewOps,
1203                             APInt &AccumulatedConstant,
1204                             const SCEV *const *Ops, size_t NumOperands,
1205                             const APInt &Scale,
1206                             ScalarEvolution &SE) {
1207  bool Interesting = false;
1208
1209  // Iterate over the add operands.
1210  for (unsigned i = 0, e = NumOperands; i != e; ++i) {
1211    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1212    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1213      APInt NewScale =
1214        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1215      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1216        // A multiplication of a constant with another add; recurse.
1217        const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1218        Interesting |=
1219          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1220                                       Add->op_begin(), Add->getNumOperands(),
1221                                       NewScale, SE);
1222      } else {
1223        // A multiplication of a constant with some other value. Update
1224        // the map.
1225        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1226        const SCEV *Key = SE.getMulExpr(MulOps);
1227        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1228          M.insert(std::make_pair(Key, NewScale));
1229        if (Pair.second) {
1230          NewOps.push_back(Pair.first->first);
1231        } else {
1232          Pair.first->second += NewScale;
1233          // The map already had an entry for this value, which may indicate
1234          // a folding opportunity.
1235          Interesting = true;
1236        }
1237      }
1238    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1239      // Pull a buried constant out to the outside.
1240      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1241        Interesting = true;
1242      AccumulatedConstant += Scale * C->getValue()->getValue();
1243    } else {
1244      // An ordinary operand. Update the map.
1245      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1246        M.insert(std::make_pair(Ops[i], Scale));
1247      if (Pair.second) {
1248        NewOps.push_back(Pair.first->first);
1249      } else {
1250        Pair.first->second += Scale;
1251        // The map already had an entry for this value, which may indicate
1252        // a folding opportunity.
1253        Interesting = true;
1254      }
1255    }
1256  }
1257
1258  return Interesting;
1259}
1260
1261namespace {
1262  struct APIntCompare {
1263    bool operator()(const APInt &LHS, const APInt &RHS) const {
1264      return LHS.ult(RHS);
1265    }
1266  };
1267}
1268
1269/// getAddExpr - Get a canonical add expression, or something simpler if
1270/// possible.
1271const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1272                                        bool HasNUW, bool HasNSW) {
1273  assert(!Ops.empty() && "Cannot get empty add!");
1274  if (Ops.size() == 1) return Ops[0];
1275#ifndef NDEBUG
1276  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1277    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1278           getEffectiveSCEVType(Ops[0]->getType()) &&
1279           "SCEVAddExpr operand types don't match!");
1280#endif
1281
1282  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1283  if (!HasNUW && HasNSW) {
1284    bool All = true;
1285    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1286      if (!isKnownNonNegative(Ops[i])) {
1287        All = false;
1288        break;
1289      }
1290    if (All) HasNUW = true;
1291  }
1292
1293  // Sort by complexity, this groups all similar expression types together.
1294  GroupByComplexity(Ops, LI);
1295
1296  // If there are any constants, fold them together.
1297  unsigned Idx = 0;
1298  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1299    ++Idx;
1300    assert(Idx < Ops.size());
1301    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1302      // We found two constants, fold them together!
1303      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1304                           RHSC->getValue()->getValue());
1305      if (Ops.size() == 2) return Ops[0];
1306      Ops.erase(Ops.begin()+1);  // Erase the folded element
1307      LHSC = cast<SCEVConstant>(Ops[0]);
1308    }
1309
1310    // If we are left with a constant zero being added, strip it off.
1311    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1312      Ops.erase(Ops.begin());
1313      --Idx;
1314    }
1315  }
1316
1317  if (Ops.size() == 1) return Ops[0];
1318
1319  // Okay, check to see if the same value occurs in the operand list twice.  If
1320  // so, merge them together into an multiply expression.  Since we sorted the
1321  // list, these values are required to be adjacent.
1322  const Type *Ty = Ops[0]->getType();
1323  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1324    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1325      // Found a match, merge the two values into a multiply, and add any
1326      // remaining values to the result.
1327      const SCEV *Two = getIntegerSCEV(2, Ty);
1328      const SCEV *Mul = getMulExpr(Ops[i], Two);
1329      if (Ops.size() == 2)
1330        return Mul;
1331      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1332      Ops.push_back(Mul);
1333      return getAddExpr(Ops, HasNUW, HasNSW);
1334    }
1335
1336  // Check for truncates. If all the operands are truncated from the same
1337  // type, see if factoring out the truncate would permit the result to be
1338  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1339  // if the contents of the resulting outer trunc fold to something simple.
1340  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1341    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1342    const Type *DstType = Trunc->getType();
1343    const Type *SrcType = Trunc->getOperand()->getType();
1344    SmallVector<const SCEV *, 8> LargeOps;
1345    bool Ok = true;
1346    // Check all the operands to see if they can be represented in the
1347    // source type of the truncate.
1348    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1349      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1350        if (T->getOperand()->getType() != SrcType) {
1351          Ok = false;
1352          break;
1353        }
1354        LargeOps.push_back(T->getOperand());
1355      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1356        // This could be either sign or zero extension, but sign extension
1357        // is much more likely to be foldable here.
1358        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1359      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1360        SmallVector<const SCEV *, 8> LargeMulOps;
1361        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1362          if (const SCEVTruncateExpr *T =
1363                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1364            if (T->getOperand()->getType() != SrcType) {
1365              Ok = false;
1366              break;
1367            }
1368            LargeMulOps.push_back(T->getOperand());
1369          } else if (const SCEVConstant *C =
1370                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1371            // This could be either sign or zero extension, but sign extension
1372            // is much more likely to be foldable here.
1373            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1374          } else {
1375            Ok = false;
1376            break;
1377          }
1378        }
1379        if (Ok)
1380          LargeOps.push_back(getMulExpr(LargeMulOps));
1381      } else {
1382        Ok = false;
1383        break;
1384      }
1385    }
1386    if (Ok) {
1387      // Evaluate the expression in the larger type.
1388      const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
1389      // If it folds to something simple, use it. Otherwise, don't.
1390      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1391        return getTruncateExpr(Fold, DstType);
1392    }
1393  }
1394
1395  // Skip past any other cast SCEVs.
1396  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1397    ++Idx;
1398
1399  // If there are add operands they would be next.
1400  if (Idx < Ops.size()) {
1401    bool DeletedAdd = false;
1402    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1403      // If we have an add, expand the add operands onto the end of the operands
1404      // list.
1405      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1406      Ops.erase(Ops.begin()+Idx);
1407      DeletedAdd = true;
1408    }
1409
1410    // If we deleted at least one add, we added operands to the end of the list,
1411    // and they are not necessarily sorted.  Recurse to resort and resimplify
1412    // any operands we just acquired.
1413    if (DeletedAdd)
1414      return getAddExpr(Ops);
1415  }
1416
1417  // Skip over the add expression until we get to a multiply.
1418  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1419    ++Idx;
1420
1421  // Check to see if there are any folding opportunities present with
1422  // operands multiplied by constant values.
1423  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1424    uint64_t BitWidth = getTypeSizeInBits(Ty);
1425    DenseMap<const SCEV *, APInt> M;
1426    SmallVector<const SCEV *, 8> NewOps;
1427    APInt AccumulatedConstant(BitWidth, 0);
1428    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1429                                     Ops.data(), Ops.size(),
1430                                     APInt(BitWidth, 1), *this)) {
1431      // Some interesting folding opportunity is present, so its worthwhile to
1432      // re-generate the operands list. Group the operands by constant scale,
1433      // to avoid multiplying by the same constant scale multiple times.
1434      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1435      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1436           E = NewOps.end(); I != E; ++I)
1437        MulOpLists[M.find(*I)->second].push_back(*I);
1438      // Re-generate the operands list.
1439      Ops.clear();
1440      if (AccumulatedConstant != 0)
1441        Ops.push_back(getConstant(AccumulatedConstant));
1442      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1443           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1444        if (I->first != 0)
1445          Ops.push_back(getMulExpr(getConstant(I->first),
1446                                   getAddExpr(I->second)));
1447      if (Ops.empty())
1448        return getIntegerSCEV(0, Ty);
1449      if (Ops.size() == 1)
1450        return Ops[0];
1451      return getAddExpr(Ops);
1452    }
1453  }
1454
1455  // If we are adding something to a multiply expression, make sure the
1456  // something is not already an operand of the multiply.  If so, merge it into
1457  // the multiply.
1458  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1459    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1460    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1461      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1462      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1463        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1464          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1465          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1466          if (Mul->getNumOperands() != 2) {
1467            // If the multiply has more than two operands, we must get the
1468            // Y*Z term.
1469            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1470            MulOps.erase(MulOps.begin()+MulOp);
1471            InnerMul = getMulExpr(MulOps);
1472          }
1473          const SCEV *One = getIntegerSCEV(1, Ty);
1474          const SCEV *AddOne = getAddExpr(InnerMul, One);
1475          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1476          if (Ops.size() == 2) return OuterMul;
1477          if (AddOp < Idx) {
1478            Ops.erase(Ops.begin()+AddOp);
1479            Ops.erase(Ops.begin()+Idx-1);
1480          } else {
1481            Ops.erase(Ops.begin()+Idx);
1482            Ops.erase(Ops.begin()+AddOp-1);
1483          }
1484          Ops.push_back(OuterMul);
1485          return getAddExpr(Ops);
1486        }
1487
1488      // Check this multiply against other multiplies being added together.
1489      for (unsigned OtherMulIdx = Idx+1;
1490           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1491           ++OtherMulIdx) {
1492        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1493        // If MulOp occurs in OtherMul, we can fold the two multiplies
1494        // together.
1495        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1496             OMulOp != e; ++OMulOp)
1497          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1498            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1499            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1500            if (Mul->getNumOperands() != 2) {
1501              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1502                                                  Mul->op_end());
1503              MulOps.erase(MulOps.begin()+MulOp);
1504              InnerMul1 = getMulExpr(MulOps);
1505            }
1506            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1507            if (OtherMul->getNumOperands() != 2) {
1508              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1509                                                  OtherMul->op_end());
1510              MulOps.erase(MulOps.begin()+OMulOp);
1511              InnerMul2 = getMulExpr(MulOps);
1512            }
1513            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1514            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1515            if (Ops.size() == 2) return OuterMul;
1516            Ops.erase(Ops.begin()+Idx);
1517            Ops.erase(Ops.begin()+OtherMulIdx-1);
1518            Ops.push_back(OuterMul);
1519            return getAddExpr(Ops);
1520          }
1521      }
1522    }
1523  }
1524
1525  // If there are any add recurrences in the operands list, see if any other
1526  // added values are loop invariant.  If so, we can fold them into the
1527  // recurrence.
1528  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1529    ++Idx;
1530
1531  // Scan over all recurrences, trying to fold loop invariants into them.
1532  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1533    // Scan all of the other operands to this add and add them to the vector if
1534    // they are loop invariant w.r.t. the recurrence.
1535    SmallVector<const SCEV *, 8> LIOps;
1536    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1537    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1538      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1539        LIOps.push_back(Ops[i]);
1540        Ops.erase(Ops.begin()+i);
1541        --i; --e;
1542      }
1543
1544    // If we found some loop invariants, fold them into the recurrence.
1545    if (!LIOps.empty()) {
1546      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1547      LIOps.push_back(AddRec->getStart());
1548
1549      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1550                                             AddRec->op_end());
1551      AddRecOps[0] = getAddExpr(LIOps);
1552
1553      // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
1554      // is not associative so this isn't necessarily safe.
1555      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1556
1557      // If all of the other operands were loop invariant, we are done.
1558      if (Ops.size() == 1) return NewRec;
1559
1560      // Otherwise, add the folded AddRec by the non-liv parts.
1561      for (unsigned i = 0;; ++i)
1562        if (Ops[i] == AddRec) {
1563          Ops[i] = NewRec;
1564          break;
1565        }
1566      return getAddExpr(Ops);
1567    }
1568
1569    // Okay, if there weren't any loop invariants to be folded, check to see if
1570    // there are multiple AddRec's with the same loop induction variable being
1571    // added together.  If so, we can fold them.
1572    for (unsigned OtherIdx = Idx+1;
1573         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1574      if (OtherIdx != Idx) {
1575        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1576        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1577          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1578          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1579                                              AddRec->op_end());
1580          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1581            if (i >= NewOps.size()) {
1582              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1583                            OtherAddRec->op_end());
1584              break;
1585            }
1586            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1587          }
1588          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1589
1590          if (Ops.size() == 2) return NewAddRec;
1591
1592          Ops.erase(Ops.begin()+Idx);
1593          Ops.erase(Ops.begin()+OtherIdx-1);
1594          Ops.push_back(NewAddRec);
1595          return getAddExpr(Ops);
1596        }
1597      }
1598
1599    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1600    // next one.
1601  }
1602
1603  // Okay, it looks like we really DO need an add expr.  Check to see if we
1604  // already have one, otherwise create a new one.
1605  FoldingSetNodeID ID;
1606  ID.AddInteger(scAddExpr);
1607  ID.AddInteger(Ops.size());
1608  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1609    ID.AddPointer(Ops[i]);
1610  void *IP = 0;
1611  SCEVAddExpr *S =
1612    static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1613  if (!S) {
1614    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1615    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1616    S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1617                                        O, Ops.size());
1618    UniqueSCEVs.InsertNode(S, IP);
1619  }
1620  if (HasNUW) S->setHasNoUnsignedWrap(true);
1621  if (HasNSW) S->setHasNoSignedWrap(true);
1622  return S;
1623}
1624
1625/// getMulExpr - Get a canonical multiply expression, or something simpler if
1626/// possible.
1627const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1628                                        bool HasNUW, bool HasNSW) {
1629  assert(!Ops.empty() && "Cannot get empty mul!");
1630  if (Ops.size() == 1) return Ops[0];
1631#ifndef NDEBUG
1632  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1633    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1634           getEffectiveSCEVType(Ops[0]->getType()) &&
1635           "SCEVMulExpr operand types don't match!");
1636#endif
1637
1638  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1639  if (!HasNUW && HasNSW) {
1640    bool All = true;
1641    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1642      if (!isKnownNonNegative(Ops[i])) {
1643        All = false;
1644        break;
1645      }
1646    if (All) HasNUW = true;
1647  }
1648
1649  // Sort by complexity, this groups all similar expression types together.
1650  GroupByComplexity(Ops, LI);
1651
1652  // If there are any constants, fold them together.
1653  unsigned Idx = 0;
1654  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1655
1656    // C1*(C2+V) -> C1*C2 + C1*V
1657    if (Ops.size() == 2)
1658      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1659        if (Add->getNumOperands() == 2 &&
1660            isa<SCEVConstant>(Add->getOperand(0)))
1661          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1662                            getMulExpr(LHSC, Add->getOperand(1)));
1663
1664    ++Idx;
1665    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1666      // We found two constants, fold them together!
1667      ConstantInt *Fold = ConstantInt::get(getContext(),
1668                                           LHSC->getValue()->getValue() *
1669                                           RHSC->getValue()->getValue());
1670      Ops[0] = getConstant(Fold);
1671      Ops.erase(Ops.begin()+1);  // Erase the folded element
1672      if (Ops.size() == 1) return Ops[0];
1673      LHSC = cast<SCEVConstant>(Ops[0]);
1674    }
1675
1676    // If we are left with a constant one being multiplied, strip it off.
1677    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1678      Ops.erase(Ops.begin());
1679      --Idx;
1680    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1681      // If we have a multiply of zero, it will always be zero.
1682      return Ops[0];
1683    } else if (Ops[0]->isAllOnesValue()) {
1684      // If we have a mul by -1 of an add, try distributing the -1 among the
1685      // add operands.
1686      if (Ops.size() == 2)
1687        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1688          SmallVector<const SCEV *, 4> NewOps;
1689          bool AnyFolded = false;
1690          for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1691               I != E; ++I) {
1692            const SCEV *Mul = getMulExpr(Ops[0], *I);
1693            if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1694            NewOps.push_back(Mul);
1695          }
1696          if (AnyFolded)
1697            return getAddExpr(NewOps);
1698        }
1699    }
1700  }
1701
1702  // Skip over the add expression until we get to a multiply.
1703  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1704    ++Idx;
1705
1706  if (Ops.size() == 1)
1707    return Ops[0];
1708
1709  // If there are mul operands inline them all into this expression.
1710  if (Idx < Ops.size()) {
1711    bool DeletedMul = false;
1712    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1713      // If we have an mul, expand the mul operands onto the end of the operands
1714      // list.
1715      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1716      Ops.erase(Ops.begin()+Idx);
1717      DeletedMul = true;
1718    }
1719
1720    // If we deleted at least one mul, we added operands to the end of the list,
1721    // and they are not necessarily sorted.  Recurse to resort and resimplify
1722    // any operands we just acquired.
1723    if (DeletedMul)
1724      return getMulExpr(Ops);
1725  }
1726
1727  // If there are any add recurrences in the operands list, see if any other
1728  // added values are loop invariant.  If so, we can fold them into the
1729  // recurrence.
1730  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1731    ++Idx;
1732
1733  // Scan over all recurrences, trying to fold loop invariants into them.
1734  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1735    // Scan all of the other operands to this mul and add them to the vector if
1736    // they are loop invariant w.r.t. the recurrence.
1737    SmallVector<const SCEV *, 8> LIOps;
1738    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1739    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1740      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1741        LIOps.push_back(Ops[i]);
1742        Ops.erase(Ops.begin()+i);
1743        --i; --e;
1744      }
1745
1746    // If we found some loop invariants, fold them into the recurrence.
1747    if (!LIOps.empty()) {
1748      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1749      SmallVector<const SCEV *, 4> NewOps;
1750      NewOps.reserve(AddRec->getNumOperands());
1751      if (LIOps.size() == 1) {
1752        const SCEV *Scale = LIOps[0];
1753        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1754          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1755      } else {
1756        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1757          SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1758          MulOps.push_back(AddRec->getOperand(i));
1759          NewOps.push_back(getMulExpr(MulOps));
1760        }
1761      }
1762
1763      // It's tempting to propagate the NSW flag here, but nsw multiplication
1764      // is not associative so this isn't necessarily safe.
1765      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
1766                                         HasNUW && AddRec->hasNoUnsignedWrap(),
1767                                         /*HasNSW=*/false);
1768
1769      // If all of the other operands were loop invariant, we are done.
1770      if (Ops.size() == 1) return NewRec;
1771
1772      // Otherwise, multiply the folded AddRec by the non-liv parts.
1773      for (unsigned i = 0;; ++i)
1774        if (Ops[i] == AddRec) {
1775          Ops[i] = NewRec;
1776          break;
1777        }
1778      return getMulExpr(Ops);
1779    }
1780
1781    // Okay, if there weren't any loop invariants to be folded, check to see if
1782    // there are multiple AddRec's with the same loop induction variable being
1783    // multiplied together.  If so, we can fold them.
1784    for (unsigned OtherIdx = Idx+1;
1785         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1786      if (OtherIdx != Idx) {
1787        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1788        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1789          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1790          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1791          const SCEV *NewStart = getMulExpr(F->getStart(),
1792                                                 G->getStart());
1793          const SCEV *B = F->getStepRecurrence(*this);
1794          const SCEV *D = G->getStepRecurrence(*this);
1795          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1796                                          getMulExpr(G, B),
1797                                          getMulExpr(B, D));
1798          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1799                                               F->getLoop());
1800          if (Ops.size() == 2) return NewAddRec;
1801
1802          Ops.erase(Ops.begin()+Idx);
1803          Ops.erase(Ops.begin()+OtherIdx-1);
1804          Ops.push_back(NewAddRec);
1805          return getMulExpr(Ops);
1806        }
1807      }
1808
1809    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1810    // next one.
1811  }
1812
1813  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1814  // already have one, otherwise create a new one.
1815  FoldingSetNodeID ID;
1816  ID.AddInteger(scMulExpr);
1817  ID.AddInteger(Ops.size());
1818  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1819    ID.AddPointer(Ops[i]);
1820  void *IP = 0;
1821  SCEVMulExpr *S =
1822    static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1823  if (!S) {
1824    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1825    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1826    S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
1827                                        O, Ops.size());
1828    UniqueSCEVs.InsertNode(S, IP);
1829  }
1830  if (HasNUW) S->setHasNoUnsignedWrap(true);
1831  if (HasNSW) S->setHasNoSignedWrap(true);
1832  return S;
1833}
1834
1835/// getUDivExpr - Get a canonical unsigned division expression, or something
1836/// simpler if possible.
1837const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1838                                         const SCEV *RHS) {
1839  assert(getEffectiveSCEVType(LHS->getType()) ==
1840         getEffectiveSCEVType(RHS->getType()) &&
1841         "SCEVUDivExpr operand types don't match!");
1842
1843  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1844    if (RHSC->getValue()->equalsInt(1))
1845      return LHS;                               // X udiv 1 --> x
1846    if (RHSC->isZero())
1847      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1848
1849    // Determine if the division can be folded into the operands of
1850    // its operands.
1851    // TODO: Generalize this to non-constants by using known-bits information.
1852    const Type *Ty = LHS->getType();
1853    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1854    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1855    // For non-power-of-two values, effectively round the value up to the
1856    // nearest power of two.
1857    if (!RHSC->getValue()->getValue().isPowerOf2())
1858      ++MaxShiftAmt;
1859    const IntegerType *ExtTy =
1860      IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1861    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1862    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1863      if (const SCEVConstant *Step =
1864            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1865        if (!Step->getValue()->getValue()
1866              .urem(RHSC->getValue()->getValue()) &&
1867            getZeroExtendExpr(AR, ExtTy) ==
1868            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1869                          getZeroExtendExpr(Step, ExtTy),
1870                          AR->getLoop())) {
1871          SmallVector<const SCEV *, 4> Operands;
1872          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1873            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1874          return getAddRecExpr(Operands, AR->getLoop());
1875        }
1876    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1877    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1878      SmallVector<const SCEV *, 4> Operands;
1879      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1880        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1881      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1882        // Find an operand that's safely divisible.
1883        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1884          const SCEV *Op = M->getOperand(i);
1885          const SCEV *Div = getUDivExpr(Op, RHSC);
1886          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1887            Operands = SmallVector<const SCEV *, 4>(M->op_begin(), M->op_end());
1888            Operands[i] = Div;
1889            return getMulExpr(Operands);
1890          }
1891        }
1892    }
1893    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1894    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1895      SmallVector<const SCEV *, 4> Operands;
1896      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1897        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1898      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1899        Operands.clear();
1900        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1901          const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1902          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1903            break;
1904          Operands.push_back(Op);
1905        }
1906        if (Operands.size() == A->getNumOperands())
1907          return getAddExpr(Operands);
1908      }
1909    }
1910
1911    // Fold if both operands are constant.
1912    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1913      Constant *LHSCV = LHSC->getValue();
1914      Constant *RHSCV = RHSC->getValue();
1915      return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1916                                                                 RHSCV)));
1917    }
1918  }
1919
1920  FoldingSetNodeID ID;
1921  ID.AddInteger(scUDivExpr);
1922  ID.AddPointer(LHS);
1923  ID.AddPointer(RHS);
1924  void *IP = 0;
1925  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1926  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
1927                                             LHS, RHS);
1928  UniqueSCEVs.InsertNode(S, IP);
1929  return S;
1930}
1931
1932
1933/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1934/// Simplify the expression as much as possible.
1935const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1936                                           const SCEV *Step, const Loop *L,
1937                                           bool HasNUW, bool HasNSW) {
1938  SmallVector<const SCEV *, 4> Operands;
1939  Operands.push_back(Start);
1940  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1941    if (StepChrec->getLoop() == L) {
1942      Operands.insert(Operands.end(), StepChrec->op_begin(),
1943                      StepChrec->op_end());
1944      return getAddRecExpr(Operands, L);
1945    }
1946
1947  Operands.push_back(Step);
1948  return getAddRecExpr(Operands, L, HasNUW, HasNSW);
1949}
1950
1951/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1952/// Simplify the expression as much as possible.
1953const SCEV *
1954ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1955                               const Loop *L,
1956                               bool HasNUW, bool HasNSW) {
1957  if (Operands.size() == 1) return Operands[0];
1958#ifndef NDEBUG
1959  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1960    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1961           getEffectiveSCEVType(Operands[0]->getType()) &&
1962           "SCEVAddRecExpr operand types don't match!");
1963#endif
1964
1965  if (Operands.back()->isZero()) {
1966    Operands.pop_back();
1967    return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0}  -->  X
1968  }
1969
1970  // It's tempting to want to call getMaxBackedgeTakenCount count here and
1971  // use that information to infer NUW and NSW flags. However, computing a
1972  // BE count requires calling getAddRecExpr, so we may not yet have a
1973  // meaningful BE count at this point (and if we don't, we'd be stuck
1974  // with a SCEVCouldNotCompute as the cached BE count).
1975
1976  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1977  if (!HasNUW && HasNSW) {
1978    bool All = true;
1979    for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1980      if (!isKnownNonNegative(Operands[i])) {
1981        All = false;
1982        break;
1983      }
1984    if (All) HasNUW = true;
1985  }
1986
1987  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1988  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1989    const Loop *NestedLoop = NestedAR->getLoop();
1990    if (L->contains(NestedLoop->getHeader()) ?
1991        (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
1992        (!NestedLoop->contains(L->getHeader()) &&
1993         DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
1994      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1995                                                  NestedAR->op_end());
1996      Operands[0] = NestedAR->getStart();
1997      // AddRecs require their operands be loop-invariant with respect to their
1998      // loops. Don't perform this transformation if it would break this
1999      // requirement.
2000      bool AllInvariant = true;
2001      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2002        if (!Operands[i]->isLoopInvariant(L)) {
2003          AllInvariant = false;
2004          break;
2005        }
2006      if (AllInvariant) {
2007        NestedOperands[0] = getAddRecExpr(Operands, L);
2008        AllInvariant = true;
2009        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2010          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
2011            AllInvariant = false;
2012            break;
2013          }
2014        if (AllInvariant)
2015          // Ok, both add recurrences are valid after the transformation.
2016          return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
2017      }
2018      // Reset Operands to its original state.
2019      Operands[0] = NestedAR;
2020    }
2021  }
2022
2023  // Okay, it looks like we really DO need an addrec expr.  Check to see if we
2024  // already have one, otherwise create a new one.
2025  FoldingSetNodeID ID;
2026  ID.AddInteger(scAddRecExpr);
2027  ID.AddInteger(Operands.size());
2028  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2029    ID.AddPointer(Operands[i]);
2030  ID.AddPointer(L);
2031  void *IP = 0;
2032  SCEVAddRecExpr *S =
2033    static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2034  if (!S) {
2035    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2036    std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2037    S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2038                                           O, Operands.size(), L);
2039    UniqueSCEVs.InsertNode(S, IP);
2040  }
2041  if (HasNUW) S->setHasNoUnsignedWrap(true);
2042  if (HasNSW) S->setHasNoSignedWrap(true);
2043  return S;
2044}
2045
2046const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2047                                         const SCEV *RHS) {
2048  SmallVector<const SCEV *, 2> Ops;
2049  Ops.push_back(LHS);
2050  Ops.push_back(RHS);
2051  return getSMaxExpr(Ops);
2052}
2053
2054const SCEV *
2055ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2056  assert(!Ops.empty() && "Cannot get empty smax!");
2057  if (Ops.size() == 1) return Ops[0];
2058#ifndef NDEBUG
2059  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2060    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
2061           getEffectiveSCEVType(Ops[0]->getType()) &&
2062           "SCEVSMaxExpr operand types don't match!");
2063#endif
2064
2065  // Sort by complexity, this groups all similar expression types together.
2066  GroupByComplexity(Ops, LI);
2067
2068  // If there are any constants, fold them together.
2069  unsigned Idx = 0;
2070  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2071    ++Idx;
2072    assert(Idx < Ops.size());
2073    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2074      // We found two constants, fold them together!
2075      ConstantInt *Fold = ConstantInt::get(getContext(),
2076                              APIntOps::smax(LHSC->getValue()->getValue(),
2077                                             RHSC->getValue()->getValue()));
2078      Ops[0] = getConstant(Fold);
2079      Ops.erase(Ops.begin()+1);  // Erase the folded element
2080      if (Ops.size() == 1) return Ops[0];
2081      LHSC = cast<SCEVConstant>(Ops[0]);
2082    }
2083
2084    // If we are left with a constant minimum-int, strip it off.
2085    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2086      Ops.erase(Ops.begin());
2087      --Idx;
2088    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2089      // If we have an smax with a constant maximum-int, it will always be
2090      // maximum-int.
2091      return Ops[0];
2092    }
2093  }
2094
2095  if (Ops.size() == 1) return Ops[0];
2096
2097  // Find the first SMax
2098  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2099    ++Idx;
2100
2101  // Check to see if one of the operands is an SMax. If so, expand its operands
2102  // onto our operand list, and recurse to simplify.
2103  if (Idx < Ops.size()) {
2104    bool DeletedSMax = false;
2105    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2106      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
2107      Ops.erase(Ops.begin()+Idx);
2108      DeletedSMax = true;
2109    }
2110
2111    if (DeletedSMax)
2112      return getSMaxExpr(Ops);
2113  }
2114
2115  // Okay, check to see if the same value occurs in the operand list twice.  If
2116  // so, delete one.  Since we sorted the list, these values are required to
2117  // be adjacent.
2118  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2119    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
2120      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2121      --i; --e;
2122    }
2123
2124  if (Ops.size() == 1) return Ops[0];
2125
2126  assert(!Ops.empty() && "Reduced smax down to nothing!");
2127
2128  // Okay, it looks like we really DO need an smax expr.  Check to see if we
2129  // already have one, otherwise create a new one.
2130  FoldingSetNodeID ID;
2131  ID.AddInteger(scSMaxExpr);
2132  ID.AddInteger(Ops.size());
2133  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2134    ID.AddPointer(Ops[i]);
2135  void *IP = 0;
2136  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2137  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2138  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2139  SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2140                                             O, Ops.size());
2141  UniqueSCEVs.InsertNode(S, IP);
2142  return S;
2143}
2144
2145const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2146                                         const SCEV *RHS) {
2147  SmallVector<const SCEV *, 2> Ops;
2148  Ops.push_back(LHS);
2149  Ops.push_back(RHS);
2150  return getUMaxExpr(Ops);
2151}
2152
2153const SCEV *
2154ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2155  assert(!Ops.empty() && "Cannot get empty umax!");
2156  if (Ops.size() == 1) return Ops[0];
2157#ifndef NDEBUG
2158  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2159    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
2160           getEffectiveSCEVType(Ops[0]->getType()) &&
2161           "SCEVUMaxExpr operand types don't match!");
2162#endif
2163
2164  // Sort by complexity, this groups all similar expression types together.
2165  GroupByComplexity(Ops, LI);
2166
2167  // If there are any constants, fold them together.
2168  unsigned Idx = 0;
2169  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2170    ++Idx;
2171    assert(Idx < Ops.size());
2172    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2173      // We found two constants, fold them together!
2174      ConstantInt *Fold = ConstantInt::get(getContext(),
2175                              APIntOps::umax(LHSC->getValue()->getValue(),
2176                                             RHSC->getValue()->getValue()));
2177      Ops[0] = getConstant(Fold);
2178      Ops.erase(Ops.begin()+1);  // Erase the folded element
2179      if (Ops.size() == 1) return Ops[0];
2180      LHSC = cast<SCEVConstant>(Ops[0]);
2181    }
2182
2183    // If we are left with a constant minimum-int, strip it off.
2184    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2185      Ops.erase(Ops.begin());
2186      --Idx;
2187    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2188      // If we have an umax with a constant maximum-int, it will always be
2189      // maximum-int.
2190      return Ops[0];
2191    }
2192  }
2193
2194  if (Ops.size() == 1) return Ops[0];
2195
2196  // Find the first UMax
2197  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2198    ++Idx;
2199
2200  // Check to see if one of the operands is a UMax. If so, expand its operands
2201  // onto our operand list, and recurse to simplify.
2202  if (Idx < Ops.size()) {
2203    bool DeletedUMax = false;
2204    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2205      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
2206      Ops.erase(Ops.begin()+Idx);
2207      DeletedUMax = true;
2208    }
2209
2210    if (DeletedUMax)
2211      return getUMaxExpr(Ops);
2212  }
2213
2214  // Okay, check to see if the same value occurs in the operand list twice.  If
2215  // so, delete one.  Since we sorted the list, these values are required to
2216  // be adjacent.
2217  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2218    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
2219      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2220      --i; --e;
2221    }
2222
2223  if (Ops.size() == 1) return Ops[0];
2224
2225  assert(!Ops.empty() && "Reduced umax down to nothing!");
2226
2227  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2228  // already have one, otherwise create a new one.
2229  FoldingSetNodeID ID;
2230  ID.AddInteger(scUMaxExpr);
2231  ID.AddInteger(Ops.size());
2232  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2233    ID.AddPointer(Ops[i]);
2234  void *IP = 0;
2235  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2236  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2237  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2238  SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2239                                             O, Ops.size());
2240  UniqueSCEVs.InsertNode(S, IP);
2241  return S;
2242}
2243
2244const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2245                                         const SCEV *RHS) {
2246  // ~smax(~x, ~y) == smin(x, y).
2247  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2248}
2249
2250const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2251                                         const SCEV *RHS) {
2252  // ~umax(~x, ~y) == umin(x, y)
2253  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2254}
2255
2256const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
2257  Constant *C = ConstantExpr::getSizeOf(AllocTy);
2258  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2259    C = ConstantFoldConstantExpression(CE, TD);
2260  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2261  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2262}
2263
2264const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
2265  Constant *C = ConstantExpr::getAlignOf(AllocTy);
2266  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2267    C = ConstantFoldConstantExpression(CE, TD);
2268  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2269  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2270}
2271
2272const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
2273                                             unsigned FieldNo) {
2274  Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2275  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2276    C = ConstantFoldConstantExpression(CE, TD);
2277  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2278  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2279}
2280
2281const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
2282                                             Constant *FieldNo) {
2283  Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2284  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2285    C = ConstantFoldConstantExpression(CE, TD);
2286  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2287  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2288}
2289
2290const SCEV *ScalarEvolution::getUnknown(Value *V) {
2291  // Don't attempt to do anything other than create a SCEVUnknown object
2292  // here.  createSCEV only calls getUnknown after checking for all other
2293  // interesting possibilities, and any other code that calls getUnknown
2294  // is doing so in order to hide a value from SCEV canonicalization.
2295
2296  FoldingSetNodeID ID;
2297  ID.AddInteger(scUnknown);
2298  ID.AddPointer(V);
2299  void *IP = 0;
2300  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2301  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
2302  UniqueSCEVs.InsertNode(S, IP);
2303  return S;
2304}
2305
2306//===----------------------------------------------------------------------===//
2307//            Basic SCEV Analysis and PHI Idiom Recognition Code
2308//
2309
2310/// isSCEVable - Test if values of the given type are analyzable within
2311/// the SCEV framework. This primarily includes integer types, and it
2312/// can optionally include pointer types if the ScalarEvolution class
2313/// has access to target-specific information.
2314bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2315  // Integers and pointers are always SCEVable.
2316  return Ty->isIntegerTy() || Ty->isPointerTy();
2317}
2318
2319/// getTypeSizeInBits - Return the size in bits of the specified type,
2320/// for which isSCEVable must return true.
2321uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2322  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2323
2324  // If we have a TargetData, use it!
2325  if (TD)
2326    return TD->getTypeSizeInBits(Ty);
2327
2328  // Integer types have fixed sizes.
2329  if (Ty->isIntegerTy())
2330    return Ty->getPrimitiveSizeInBits();
2331
2332  // The only other support type is pointer. Without TargetData, conservatively
2333  // assume pointers are 64-bit.
2334  assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2335  return 64;
2336}
2337
2338/// getEffectiveSCEVType - Return a type with the same bitwidth as
2339/// the given type and which represents how SCEV will treat the given
2340/// type, for which isSCEVable must return true. For pointer types,
2341/// this is the pointer-sized integer type.
2342const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2343  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2344
2345  if (Ty->isIntegerTy())
2346    return Ty;
2347
2348  // The only other support type is pointer.
2349  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2350  if (TD) return TD->getIntPtrType(getContext());
2351
2352  // Without TargetData, conservatively assume pointers are 64-bit.
2353  return Type::getInt64Ty(getContext());
2354}
2355
2356const SCEV *ScalarEvolution::getCouldNotCompute() {
2357  return &CouldNotCompute;
2358}
2359
2360/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2361/// expression and create a new one.
2362const SCEV *ScalarEvolution::getSCEV(Value *V) {
2363  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2364
2365  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2366  if (I != Scalars.end()) return I->second;
2367  const SCEV *S = createSCEV(V);
2368  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2369  return S;
2370}
2371
2372/// getIntegerSCEV - Given a SCEVable type, create a constant for the
2373/// specified signed integer value and return a SCEV for the constant.
2374const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
2375  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2376  return getConstant(ConstantInt::get(ITy, Val));
2377}
2378
2379/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2380///
2381const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2382  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2383    return getConstant(
2384               cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2385
2386  const Type *Ty = V->getType();
2387  Ty = getEffectiveSCEVType(Ty);
2388  return getMulExpr(V,
2389                  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2390}
2391
2392/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2393const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2394  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2395    return getConstant(
2396                cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2397
2398  const Type *Ty = V->getType();
2399  Ty = getEffectiveSCEVType(Ty);
2400  const SCEV *AllOnes =
2401                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2402  return getMinusSCEV(AllOnes, V);
2403}
2404
2405/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2406///
2407const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2408                                          const SCEV *RHS) {
2409  // X - Y --> X + -Y
2410  return getAddExpr(LHS, getNegativeSCEV(RHS));
2411}
2412
2413/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2414/// input value to the specified type.  If the type must be extended, it is zero
2415/// extended.
2416const SCEV *
2417ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2418                                         const Type *Ty) {
2419  const Type *SrcTy = V->getType();
2420  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2421         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2422         "Cannot truncate or zero extend with non-integer arguments!");
2423  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2424    return V;  // No conversion
2425  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2426    return getTruncateExpr(V, Ty);
2427  return getZeroExtendExpr(V, Ty);
2428}
2429
2430/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2431/// input value to the specified type.  If the type must be extended, it is sign
2432/// extended.
2433const SCEV *
2434ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2435                                         const Type *Ty) {
2436  const Type *SrcTy = V->getType();
2437  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2438         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2439         "Cannot truncate or zero extend with non-integer arguments!");
2440  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2441    return V;  // No conversion
2442  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2443    return getTruncateExpr(V, Ty);
2444  return getSignExtendExpr(V, Ty);
2445}
2446
2447/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2448/// input value to the specified type.  If the type must be extended, it is zero
2449/// extended.  The conversion must not be narrowing.
2450const SCEV *
2451ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2452  const Type *SrcTy = V->getType();
2453  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2454         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2455         "Cannot noop or zero extend with non-integer arguments!");
2456  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2457         "getNoopOrZeroExtend cannot truncate!");
2458  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2459    return V;  // No conversion
2460  return getZeroExtendExpr(V, Ty);
2461}
2462
2463/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2464/// input value to the specified type.  If the type must be extended, it is sign
2465/// extended.  The conversion must not be narrowing.
2466const SCEV *
2467ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2468  const Type *SrcTy = V->getType();
2469  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2470         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2471         "Cannot noop or sign extend with non-integer arguments!");
2472  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2473         "getNoopOrSignExtend cannot truncate!");
2474  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2475    return V;  // No conversion
2476  return getSignExtendExpr(V, Ty);
2477}
2478
2479/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2480/// the input value to the specified type. If the type must be extended,
2481/// it is extended with unspecified bits. The conversion must not be
2482/// narrowing.
2483const SCEV *
2484ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2485  const Type *SrcTy = V->getType();
2486  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2487         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2488         "Cannot noop or any extend with non-integer arguments!");
2489  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2490         "getNoopOrAnyExtend cannot truncate!");
2491  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2492    return V;  // No conversion
2493  return getAnyExtendExpr(V, Ty);
2494}
2495
2496/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2497/// input value to the specified type.  The conversion must not be widening.
2498const SCEV *
2499ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2500  const Type *SrcTy = V->getType();
2501  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2502         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2503         "Cannot truncate or noop with non-integer arguments!");
2504  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2505         "getTruncateOrNoop cannot extend!");
2506  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2507    return V;  // No conversion
2508  return getTruncateExpr(V, Ty);
2509}
2510
2511/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2512/// the types using zero-extension, and then perform a umax operation
2513/// with them.
2514const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2515                                                        const SCEV *RHS) {
2516  const SCEV *PromotedLHS = LHS;
2517  const SCEV *PromotedRHS = RHS;
2518
2519  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2520    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2521  else
2522    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2523
2524  return getUMaxExpr(PromotedLHS, PromotedRHS);
2525}
2526
2527/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2528/// the types using zero-extension, and then perform a umin operation
2529/// with them.
2530const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2531                                                        const SCEV *RHS) {
2532  const SCEV *PromotedLHS = LHS;
2533  const SCEV *PromotedRHS = RHS;
2534
2535  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2536    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2537  else
2538    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2539
2540  return getUMinExpr(PromotedLHS, PromotedRHS);
2541}
2542
2543/// PushDefUseChildren - Push users of the given Instruction
2544/// onto the given Worklist.
2545static void
2546PushDefUseChildren(Instruction *I,
2547                   SmallVectorImpl<Instruction *> &Worklist) {
2548  // Push the def-use children onto the Worklist stack.
2549  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2550       UI != UE; ++UI)
2551    Worklist.push_back(cast<Instruction>(UI));
2552}
2553
2554/// ForgetSymbolicValue - This looks up computed SCEV values for all
2555/// instructions that depend on the given instruction and removes them from
2556/// the Scalars map if they reference SymName. This is used during PHI
2557/// resolution.
2558void
2559ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2560  SmallVector<Instruction *, 16> Worklist;
2561  PushDefUseChildren(PN, Worklist);
2562
2563  SmallPtrSet<Instruction *, 8> Visited;
2564  Visited.insert(PN);
2565  while (!Worklist.empty()) {
2566    Instruction *I = Worklist.pop_back_val();
2567    if (!Visited.insert(I)) continue;
2568
2569    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
2570      Scalars.find(static_cast<Value *>(I));
2571    if (It != Scalars.end()) {
2572      // Short-circuit the def-use traversal if the symbolic name
2573      // ceases to appear in expressions.
2574      if (It->second != SymName && !It->second->hasOperand(SymName))
2575        continue;
2576
2577      // SCEVUnknown for a PHI either means that it has an unrecognized
2578      // structure, it's a PHI that's in the progress of being computed
2579      // by createNodeForPHI, or it's a single-value PHI. In the first case,
2580      // additional loop trip count information isn't going to change anything.
2581      // In the second case, createNodeForPHI will perform the necessary
2582      // updates on its own when it gets to that point. In the third, we do
2583      // want to forget the SCEVUnknown.
2584      if (!isa<PHINode>(I) ||
2585          !isa<SCEVUnknown>(It->second) ||
2586          (I != PN && It->second == SymName)) {
2587        ValuesAtScopes.erase(It->second);
2588        Scalars.erase(It);
2589      }
2590    }
2591
2592    PushDefUseChildren(I, Worklist);
2593  }
2594}
2595
2596/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2597/// a loop header, making it a potential recurrence, or it doesn't.
2598///
2599const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2600  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2601    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2602      if (L->getHeader() == PN->getParent()) {
2603        // If it lives in the loop header, it has two incoming values, one
2604        // from outside the loop, and one from inside.
2605        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2606        unsigned BackEdge     = IncomingEdge^1;
2607
2608        // While we are analyzing this PHI node, handle its value symbolically.
2609        const SCEV *SymbolicName = getUnknown(PN);
2610        assert(Scalars.find(PN) == Scalars.end() &&
2611               "PHI node already processed?");
2612        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2613
2614        // Using this symbolic name for the PHI, analyze the value coming around
2615        // the back-edge.
2616        Value *BEValueV = PN->getIncomingValue(BackEdge);
2617        const SCEV *BEValue = getSCEV(BEValueV);
2618
2619        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2620        // has a special value for the first iteration of the loop.
2621
2622        // If the value coming around the backedge is an add with the symbolic
2623        // value we just inserted, then we found a simple induction variable!
2624        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2625          // If there is a single occurrence of the symbolic value, replace it
2626          // with a recurrence.
2627          unsigned FoundIndex = Add->getNumOperands();
2628          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2629            if (Add->getOperand(i) == SymbolicName)
2630              if (FoundIndex == e) {
2631                FoundIndex = i;
2632                break;
2633              }
2634
2635          if (FoundIndex != Add->getNumOperands()) {
2636            // Create an add with everything but the specified operand.
2637            SmallVector<const SCEV *, 8> Ops;
2638            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2639              if (i != FoundIndex)
2640                Ops.push_back(Add->getOperand(i));
2641            const SCEV *Accum = getAddExpr(Ops);
2642
2643            // This is not a valid addrec if the step amount is varying each
2644            // loop iteration, but is not itself an addrec in this loop.
2645            if (Accum->isLoopInvariant(L) ||
2646                (isa<SCEVAddRecExpr>(Accum) &&
2647                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2648              bool HasNUW = false;
2649              bool HasNSW = false;
2650
2651              // If the increment doesn't overflow, then neither the addrec nor
2652              // the post-increment will overflow.
2653              if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
2654                if (OBO->hasNoUnsignedWrap())
2655                  HasNUW = true;
2656                if (OBO->hasNoSignedWrap())
2657                  HasNSW = true;
2658              }
2659
2660              const SCEV *StartVal =
2661                getSCEV(PN->getIncomingValue(IncomingEdge));
2662              const SCEV *PHISCEV =
2663                getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
2664
2665              // Since the no-wrap flags are on the increment, they apply to the
2666              // post-incremented value as well.
2667              if (Accum->isLoopInvariant(L))
2668                (void)getAddRecExpr(getAddExpr(StartVal, Accum),
2669                                    Accum, L, HasNUW, HasNSW);
2670
2671              // Okay, for the entire analysis of this edge we assumed the PHI
2672              // to be symbolic.  We now need to go back and purge all of the
2673              // entries for the scalars that use the symbolic expression.
2674              ForgetSymbolicName(PN, SymbolicName);
2675              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2676              return PHISCEV;
2677            }
2678          }
2679        } else if (const SCEVAddRecExpr *AddRec =
2680                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2681          // Otherwise, this could be a loop like this:
2682          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2683          // In this case, j = {1,+,1}  and BEValue is j.
2684          // Because the other in-value of i (0) fits the evolution of BEValue
2685          // i really is an addrec evolution.
2686          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2687            const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2688
2689            // If StartVal = j.start - j.stride, we can use StartVal as the
2690            // initial step of the addrec evolution.
2691            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2692                                            AddRec->getOperand(1))) {
2693              const SCEV *PHISCEV =
2694                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2695
2696              // Okay, for the entire analysis of this edge we assumed the PHI
2697              // to be symbolic.  We now need to go back and purge all of the
2698              // entries for the scalars that use the symbolic expression.
2699              ForgetSymbolicName(PN, SymbolicName);
2700              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2701              return PHISCEV;
2702            }
2703          }
2704        }
2705
2706        return SymbolicName;
2707      }
2708
2709  // If the PHI has a single incoming value, follow that value, unless the
2710  // PHI's incoming blocks are in a different loop, in which case doing so
2711  // risks breaking LCSSA form. Instcombine would normally zap these, but
2712  // it doesn't have DominatorTree information, so it may miss cases.
2713  if (Value *V = PN->hasConstantValue(DT)) {
2714    bool AllSameLoop = true;
2715    Loop *PNLoop = LI->getLoopFor(PN->getParent());
2716    for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2717      if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
2718        AllSameLoop = false;
2719        break;
2720      }
2721    if (AllSameLoop)
2722      return getSCEV(V);
2723  }
2724
2725  // If it's not a loop phi, we can't handle it yet.
2726  return getUnknown(PN);
2727}
2728
2729/// createNodeForGEP - Expand GEP instructions into add and multiply
2730/// operations. This allows them to be analyzed by regular SCEV code.
2731///
2732const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
2733
2734  bool InBounds = GEP->isInBounds();
2735  const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
2736  Value *Base = GEP->getOperand(0);
2737  // Don't attempt to analyze GEPs over unsized objects.
2738  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2739    return getUnknown(GEP);
2740  const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2741  gep_type_iterator GTI = gep_type_begin(GEP);
2742  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2743                                      E = GEP->op_end();
2744       I != E; ++I) {
2745    Value *Index = *I;
2746    // Compute the (potentially symbolic) offset in bytes for this index.
2747    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2748      // For a struct, add the member offset.
2749      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2750      TotalOffset = getAddExpr(TotalOffset,
2751                               getOffsetOfExpr(STy, FieldNo),
2752                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2753    } else {
2754      // For an array, add the element offset, explicitly scaled.
2755      const SCEV *LocalOffset = getSCEV(Index);
2756      // Getelementptr indices are signed.
2757      LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2758      // Lower "inbounds" GEPs to NSW arithmetic.
2759      LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
2760                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2761      TotalOffset = getAddExpr(TotalOffset, LocalOffset,
2762                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2763    }
2764  }
2765  return getAddExpr(getSCEV(Base), TotalOffset,
2766                    /*HasNUW=*/false, /*HasNSW=*/InBounds);
2767}
2768
2769/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2770/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2771/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2772/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2773uint32_t
2774ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2775  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2776    return C->getValue()->getValue().countTrailingZeros();
2777
2778  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2779    return std::min(GetMinTrailingZeros(T->getOperand()),
2780                    (uint32_t)getTypeSizeInBits(T->getType()));
2781
2782  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2783    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2784    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2785             getTypeSizeInBits(E->getType()) : OpRes;
2786  }
2787
2788  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2789    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2790    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2791             getTypeSizeInBits(E->getType()) : OpRes;
2792  }
2793
2794  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2795    // The result is the min of all operands results.
2796    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2797    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2798      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2799    return MinOpRes;
2800  }
2801
2802  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2803    // The result is the sum of all operands results.
2804    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2805    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2806    for (unsigned i = 1, e = M->getNumOperands();
2807         SumOpRes != BitWidth && i != e; ++i)
2808      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2809                          BitWidth);
2810    return SumOpRes;
2811  }
2812
2813  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2814    // The result is the min of all operands results.
2815    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2816    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2817      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2818    return MinOpRes;
2819  }
2820
2821  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2822    // The result is the min of all operands results.
2823    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2824    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2825      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2826    return MinOpRes;
2827  }
2828
2829  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2830    // The result is the min of all operands results.
2831    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2832    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2833      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2834    return MinOpRes;
2835  }
2836
2837  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2838    // For a SCEVUnknown, ask ValueTracking.
2839    unsigned BitWidth = getTypeSizeInBits(U->getType());
2840    APInt Mask = APInt::getAllOnesValue(BitWidth);
2841    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2842    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2843    return Zeros.countTrailingOnes();
2844  }
2845
2846  // SCEVUDivExpr
2847  return 0;
2848}
2849
2850/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2851///
2852ConstantRange
2853ScalarEvolution::getUnsignedRange(const SCEV *S) {
2854
2855  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2856    return ConstantRange(C->getValue()->getValue());
2857
2858  unsigned BitWidth = getTypeSizeInBits(S->getType());
2859  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2860
2861  // If the value has known zeros, the maximum unsigned value will have those
2862  // known zeros as well.
2863  uint32_t TZ = GetMinTrailingZeros(S);
2864  if (TZ != 0)
2865    ConservativeResult =
2866      ConstantRange(APInt::getMinValue(BitWidth),
2867                    APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
2868
2869  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2870    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2871    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2872      X = X.add(getUnsignedRange(Add->getOperand(i)));
2873    return ConservativeResult.intersectWith(X);
2874  }
2875
2876  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2877    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2878    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2879      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2880    return ConservativeResult.intersectWith(X);
2881  }
2882
2883  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2884    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2885    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2886      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2887    return ConservativeResult.intersectWith(X);
2888  }
2889
2890  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2891    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2892    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2893      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2894    return ConservativeResult.intersectWith(X);
2895  }
2896
2897  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2898    ConstantRange X = getUnsignedRange(UDiv->getLHS());
2899    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2900    return ConservativeResult.intersectWith(X.udiv(Y));
2901  }
2902
2903  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2904    ConstantRange X = getUnsignedRange(ZExt->getOperand());
2905    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
2906  }
2907
2908  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2909    ConstantRange X = getUnsignedRange(SExt->getOperand());
2910    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
2911  }
2912
2913  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2914    ConstantRange X = getUnsignedRange(Trunc->getOperand());
2915    return ConservativeResult.intersectWith(X.truncate(BitWidth));
2916  }
2917
2918  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2919    // If there's no unsigned wrap, the value will never be less than its
2920    // initial value.
2921    if (AddRec->hasNoUnsignedWrap())
2922      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
2923        if (!C->isZero())
2924          ConservativeResult =
2925            ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0));
2926
2927    // TODO: non-affine addrec
2928    if (AddRec->isAffine()) {
2929      const Type *Ty = AddRec->getType();
2930      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2931      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
2932          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
2933        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2934
2935        const SCEV *Start = AddRec->getStart();
2936        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2937
2938        // Check for overflow.
2939        if (!AddRec->hasNoUnsignedWrap())
2940          return ConservativeResult;
2941
2942        ConstantRange StartRange = getUnsignedRange(Start);
2943        ConstantRange EndRange = getUnsignedRange(End);
2944        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2945                                   EndRange.getUnsignedMin());
2946        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2947                                   EndRange.getUnsignedMax());
2948        if (Min.isMinValue() && Max.isMaxValue())
2949          return ConservativeResult;
2950        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
2951      }
2952    }
2953
2954    return ConservativeResult;
2955  }
2956
2957  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2958    // For a SCEVUnknown, ask ValueTracking.
2959    APInt Mask = APInt::getAllOnesValue(BitWidth);
2960    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2961    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2962    if (Ones == ~Zeros + 1)
2963      return ConservativeResult;
2964    return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
2965  }
2966
2967  return ConservativeResult;
2968}
2969
2970/// getSignedRange - Determine the signed range for a particular SCEV.
2971///
2972ConstantRange
2973ScalarEvolution::getSignedRange(const SCEV *S) {
2974
2975  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2976    return ConstantRange(C->getValue()->getValue());
2977
2978  unsigned BitWidth = getTypeSizeInBits(S->getType());
2979  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2980
2981  // If the value has known zeros, the maximum signed value will have those
2982  // known zeros as well.
2983  uint32_t TZ = GetMinTrailingZeros(S);
2984  if (TZ != 0)
2985    ConservativeResult =
2986      ConstantRange(APInt::getSignedMinValue(BitWidth),
2987                    APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
2988
2989  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2990    ConstantRange X = getSignedRange(Add->getOperand(0));
2991    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2992      X = X.add(getSignedRange(Add->getOperand(i)));
2993    return ConservativeResult.intersectWith(X);
2994  }
2995
2996  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2997    ConstantRange X = getSignedRange(Mul->getOperand(0));
2998    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2999      X = X.multiply(getSignedRange(Mul->getOperand(i)));
3000    return ConservativeResult.intersectWith(X);
3001  }
3002
3003  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3004    ConstantRange X = getSignedRange(SMax->getOperand(0));
3005    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3006      X = X.smax(getSignedRange(SMax->getOperand(i)));
3007    return ConservativeResult.intersectWith(X);
3008  }
3009
3010  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3011    ConstantRange X = getSignedRange(UMax->getOperand(0));
3012    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3013      X = X.umax(getSignedRange(UMax->getOperand(i)));
3014    return ConservativeResult.intersectWith(X);
3015  }
3016
3017  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3018    ConstantRange X = getSignedRange(UDiv->getLHS());
3019    ConstantRange Y = getSignedRange(UDiv->getRHS());
3020    return ConservativeResult.intersectWith(X.udiv(Y));
3021  }
3022
3023  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3024    ConstantRange X = getSignedRange(ZExt->getOperand());
3025    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
3026  }
3027
3028  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3029    ConstantRange X = getSignedRange(SExt->getOperand());
3030    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
3031  }
3032
3033  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3034    ConstantRange X = getSignedRange(Trunc->getOperand());
3035    return ConservativeResult.intersectWith(X.truncate(BitWidth));
3036  }
3037
3038  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3039    // If there's no signed wrap, and all the operands have the same sign or
3040    // zero, the value won't ever change sign.
3041    if (AddRec->hasNoSignedWrap()) {
3042      bool AllNonNeg = true;
3043      bool AllNonPos = true;
3044      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3045        if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3046        if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3047      }
3048      if (AllNonNeg)
3049        ConservativeResult = ConservativeResult.intersectWith(
3050          ConstantRange(APInt(BitWidth, 0),
3051                        APInt::getSignedMinValue(BitWidth)));
3052      else if (AllNonPos)
3053        ConservativeResult = ConservativeResult.intersectWith(
3054          ConstantRange(APInt::getSignedMinValue(BitWidth),
3055                        APInt(BitWidth, 1)));
3056    }
3057
3058    // TODO: non-affine addrec
3059    if (AddRec->isAffine()) {
3060      const Type *Ty = AddRec->getType();
3061      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3062      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3063          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3064        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3065
3066        const SCEV *Start = AddRec->getStart();
3067        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
3068
3069        // Check for overflow.
3070        if (!AddRec->hasNoSignedWrap())
3071          return ConservativeResult;
3072
3073        ConstantRange StartRange = getSignedRange(Start);
3074        ConstantRange EndRange = getSignedRange(End);
3075        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3076                                   EndRange.getSignedMin());
3077        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3078                                   EndRange.getSignedMax());
3079        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3080          return ConservativeResult;
3081        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
3082      }
3083    }
3084
3085    return ConservativeResult;
3086  }
3087
3088  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3089    // For a SCEVUnknown, ask ValueTracking.
3090    if (!U->getValue()->getType()->isIntegerTy() && !TD)
3091      return ConservativeResult;
3092    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3093    if (NS == 1)
3094      return ConservativeResult;
3095    return ConservativeResult.intersectWith(
3096      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3097                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
3098  }
3099
3100  return ConservativeResult;
3101}
3102
3103/// createSCEV - We know that there is no SCEV for the specified value.
3104/// Analyze the expression.
3105///
3106const SCEV *ScalarEvolution::createSCEV(Value *V) {
3107  if (!isSCEVable(V->getType()))
3108    return getUnknown(V);
3109
3110  unsigned Opcode = Instruction::UserOp1;
3111  if (Instruction *I = dyn_cast<Instruction>(V)) {
3112    Opcode = I->getOpcode();
3113
3114    // Don't attempt to analyze instructions in blocks that aren't
3115    // reachable. Such instructions don't matter, and they aren't required
3116    // to obey basic rules for definitions dominating uses which this
3117    // analysis depends on.
3118    if (!DT->isReachableFromEntry(I->getParent()))
3119      return getUnknown(V);
3120  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3121    Opcode = CE->getOpcode();
3122  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3123    return getConstant(CI);
3124  else if (isa<ConstantPointerNull>(V))
3125    return getIntegerSCEV(0, V->getType());
3126  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3127    return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3128  else
3129    return getUnknown(V);
3130
3131  Operator *U = cast<Operator>(V);
3132  switch (Opcode) {
3133  case Instruction::Add:
3134    // Don't transfer the NSW and NUW bits from the Add instruction to the
3135    // Add expression, because the Instruction may be guarded by control
3136    // flow and the no-overflow bits may not be valid for the expression in
3137    // any context.
3138    return getAddExpr(getSCEV(U->getOperand(0)),
3139                      getSCEV(U->getOperand(1)));
3140  case Instruction::Mul:
3141    // Don't transfer the NSW and NUW bits from the Mul instruction to the
3142    // Mul expression, as with Add.
3143    return getMulExpr(getSCEV(U->getOperand(0)),
3144                      getSCEV(U->getOperand(1)));
3145  case Instruction::UDiv:
3146    return getUDivExpr(getSCEV(U->getOperand(0)),
3147                       getSCEV(U->getOperand(1)));
3148  case Instruction::Sub:
3149    return getMinusSCEV(getSCEV(U->getOperand(0)),
3150                        getSCEV(U->getOperand(1)));
3151  case Instruction::And:
3152    // For an expression like x&255 that merely masks off the high bits,
3153    // use zext(trunc(x)) as the SCEV expression.
3154    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3155      if (CI->isNullValue())
3156        return getSCEV(U->getOperand(1));
3157      if (CI->isAllOnesValue())
3158        return getSCEV(U->getOperand(0));
3159      const APInt &A = CI->getValue();
3160
3161      // Instcombine's ShrinkDemandedConstant may strip bits out of
3162      // constants, obscuring what would otherwise be a low-bits mask.
3163      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3164      // knew about to reconstruct a low-bits mask value.
3165      unsigned LZ = A.countLeadingZeros();
3166      unsigned BitWidth = A.getBitWidth();
3167      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3168      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3169      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3170
3171      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3172
3173      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3174        return
3175          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3176                                IntegerType::get(getContext(), BitWidth - LZ)),
3177                            U->getType());
3178    }
3179    break;
3180
3181  case Instruction::Or:
3182    // If the RHS of the Or is a constant, we may have something like:
3183    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
3184    // optimizations will transparently handle this case.
3185    //
3186    // In order for this transformation to be safe, the LHS must be of the
3187    // form X*(2^n) and the Or constant must be less than 2^n.
3188    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3189      const SCEV *LHS = getSCEV(U->getOperand(0));
3190      const APInt &CIVal = CI->getValue();
3191      if (GetMinTrailingZeros(LHS) >=
3192          (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3193        // Build a plain add SCEV.
3194        const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3195        // If the LHS of the add was an addrec and it has no-wrap flags,
3196        // transfer the no-wrap flags, since an or won't introduce a wrap.
3197        if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3198          const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3199          if (OldAR->hasNoUnsignedWrap())
3200            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
3201          if (OldAR->hasNoSignedWrap())
3202            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
3203        }
3204        return S;
3205      }
3206    }
3207    break;
3208  case Instruction::Xor:
3209    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3210      // If the RHS of the xor is a signbit, then this is just an add.
3211      // Instcombine turns add of signbit into xor as a strength reduction step.
3212      if (CI->getValue().isSignBit())
3213        return getAddExpr(getSCEV(U->getOperand(0)),
3214                          getSCEV(U->getOperand(1)));
3215
3216      // If the RHS of xor is -1, then this is a not operation.
3217      if (CI->isAllOnesValue())
3218        return getNotSCEV(getSCEV(U->getOperand(0)));
3219
3220      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3221      // This is a variant of the check for xor with -1, and it handles
3222      // the case where instcombine has trimmed non-demanded bits out
3223      // of an xor with -1.
3224      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3225        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3226          if (BO->getOpcode() == Instruction::And &&
3227              LCI->getValue() == CI->getValue())
3228            if (const SCEVZeroExtendExpr *Z =
3229                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3230              const Type *UTy = U->getType();
3231              const SCEV *Z0 = Z->getOperand();
3232              const Type *Z0Ty = Z0->getType();
3233              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3234
3235              // If C is a low-bits mask, the zero extend is serving to
3236              // mask off the high bits. Complement the operand and
3237              // re-apply the zext.
3238              if (APIntOps::isMask(Z0TySize, CI->getValue()))
3239                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3240
3241              // If C is a single bit, it may be in the sign-bit position
3242              // before the zero-extend. In this case, represent the xor
3243              // using an add, which is equivalent, and re-apply the zext.
3244              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
3245              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3246                  Trunc.isSignBit())
3247                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3248                                         UTy);
3249            }
3250    }
3251    break;
3252
3253  case Instruction::Shl:
3254    // Turn shift left of a constant amount into a multiply.
3255    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3256      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3257      Constant *X = ConstantInt::get(getContext(),
3258        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3259      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3260    }
3261    break;
3262
3263  case Instruction::LShr:
3264    // Turn logical shift right of a constant into a unsigned divide.
3265    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3266      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3267      Constant *X = ConstantInt::get(getContext(),
3268        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3269      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3270    }
3271    break;
3272
3273  case Instruction::AShr:
3274    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3275    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3276      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
3277        if (L->getOpcode() == Instruction::Shl &&
3278            L->getOperand(1) == U->getOperand(1)) {
3279          unsigned BitWidth = getTypeSizeInBits(U->getType());
3280          uint64_t Amt = BitWidth - CI->getZExtValue();
3281          if (Amt == BitWidth)
3282            return getSCEV(L->getOperand(0));       // shift by zero --> noop
3283          if (Amt > BitWidth)
3284            return getIntegerSCEV(0, U->getType()); // value is undefined
3285          return
3286            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3287                                           IntegerType::get(getContext(), Amt)),
3288                                 U->getType());
3289        }
3290    break;
3291
3292  case Instruction::Trunc:
3293    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3294
3295  case Instruction::ZExt:
3296    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3297
3298  case Instruction::SExt:
3299    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3300
3301  case Instruction::BitCast:
3302    // BitCasts are no-op casts so we just eliminate the cast.
3303    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3304      return getSCEV(U->getOperand(0));
3305    break;
3306
3307  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3308  // lead to pointer expressions which cannot safely be expanded to GEPs,
3309  // because ScalarEvolution doesn't respect the GEP aliasing rules when
3310  // simplifying integer expressions.
3311
3312  case Instruction::GetElementPtr:
3313    return createNodeForGEP(cast<GEPOperator>(U));
3314
3315  case Instruction::PHI:
3316    return createNodeForPHI(cast<PHINode>(U));
3317
3318  case Instruction::Select:
3319    // This could be a smax or umax that was lowered earlier.
3320    // Try to recover it.
3321    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3322      Value *LHS = ICI->getOperand(0);
3323      Value *RHS = ICI->getOperand(1);
3324      switch (ICI->getPredicate()) {
3325      case ICmpInst::ICMP_SLT:
3326      case ICmpInst::ICMP_SLE:
3327        std::swap(LHS, RHS);
3328        // fall through
3329      case ICmpInst::ICMP_SGT:
3330      case ICmpInst::ICMP_SGE:
3331        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
3332          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
3333        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
3334          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
3335        break;
3336      case ICmpInst::ICMP_ULT:
3337      case ICmpInst::ICMP_ULE:
3338        std::swap(LHS, RHS);
3339        // fall through
3340      case ICmpInst::ICMP_UGT:
3341      case ICmpInst::ICMP_UGE:
3342        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
3343          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
3344        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
3345          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
3346        break;
3347      case ICmpInst::ICMP_NE:
3348        // n != 0 ? n : 1  ->  umax(n, 1)
3349        if (LHS == U->getOperand(1) &&
3350            isa<ConstantInt>(U->getOperand(2)) &&
3351            cast<ConstantInt>(U->getOperand(2))->isOne() &&
3352            isa<ConstantInt>(RHS) &&
3353            cast<ConstantInt>(RHS)->isZero())
3354          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
3355        break;
3356      case ICmpInst::ICMP_EQ:
3357        // n == 0 ? 1 : n  ->  umax(n, 1)
3358        if (LHS == U->getOperand(2) &&
3359            isa<ConstantInt>(U->getOperand(1)) &&
3360            cast<ConstantInt>(U->getOperand(1))->isOne() &&
3361            isa<ConstantInt>(RHS) &&
3362            cast<ConstantInt>(RHS)->isZero())
3363          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
3364        break;
3365      default:
3366        break;
3367      }
3368    }
3369
3370  default: // We cannot analyze this expression.
3371    break;
3372  }
3373
3374  return getUnknown(V);
3375}
3376
3377
3378
3379//===----------------------------------------------------------------------===//
3380//                   Iteration Count Computation Code
3381//
3382
3383/// getBackedgeTakenCount - If the specified loop has a predictable
3384/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3385/// object. The backedge-taken count is the number of times the loop header
3386/// will be branched to from within the loop. This is one less than the
3387/// trip count of the loop, since it doesn't count the first iteration,
3388/// when the header is branched to from outside the loop.
3389///
3390/// Note that it is not valid to call this method on a loop without a
3391/// loop-invariant backedge-taken count (see
3392/// hasLoopInvariantBackedgeTakenCount).
3393///
3394const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3395  return getBackedgeTakenInfo(L).Exact;
3396}
3397
3398/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3399/// return the least SCEV value that is known never to be less than the
3400/// actual backedge taken count.
3401const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3402  return getBackedgeTakenInfo(L).Max;
3403}
3404
3405/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3406/// onto the given Worklist.
3407static void
3408PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3409  BasicBlock *Header = L->getHeader();
3410
3411  // Push all Loop-header PHIs onto the Worklist stack.
3412  for (BasicBlock::iterator I = Header->begin();
3413       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3414    Worklist.push_back(PN);
3415}
3416
3417const ScalarEvolution::BackedgeTakenInfo &
3418ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3419  // Initially insert a CouldNotCompute for this loop. If the insertion
3420  // succeeds, proceed to actually compute a backedge-taken count and
3421  // update the value. The temporary CouldNotCompute value tells SCEV
3422  // code elsewhere that it shouldn't attempt to request a new
3423  // backedge-taken count, which could result in infinite recursion.
3424  std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
3425    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3426  if (Pair.second) {
3427    BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
3428    if (BECount.Exact != getCouldNotCompute()) {
3429      assert(BECount.Exact->isLoopInvariant(L) &&
3430             BECount.Max->isLoopInvariant(L) &&
3431             "Computed backedge-taken count isn't loop invariant for loop!");
3432      ++NumTripCountsComputed;
3433
3434      // Update the value in the map.
3435      Pair.first->second = BECount;
3436    } else {
3437      if (BECount.Max != getCouldNotCompute())
3438        // Update the value in the map.
3439        Pair.first->second = BECount;
3440      if (isa<PHINode>(L->getHeader()->begin()))
3441        // Only count loops that have phi nodes as not being computable.
3442        ++NumTripCountsNotComputed;
3443    }
3444
3445    // Now that we know more about the trip count for this loop, forget any
3446    // existing SCEV values for PHI nodes in this loop since they are only
3447    // conservative estimates made without the benefit of trip count
3448    // information. This is similar to the code in forgetLoop, except that
3449    // it handles SCEVUnknown PHI nodes specially.
3450    if (BECount.hasAnyInfo()) {
3451      SmallVector<Instruction *, 16> Worklist;
3452      PushLoopPHIs(L, Worklist);
3453
3454      SmallPtrSet<Instruction *, 8> Visited;
3455      while (!Worklist.empty()) {
3456        Instruction *I = Worklist.pop_back_val();
3457        if (!Visited.insert(I)) continue;
3458
3459        std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3460          Scalars.find(static_cast<Value *>(I));
3461        if (It != Scalars.end()) {
3462          // SCEVUnknown for a PHI either means that it has an unrecognized
3463          // structure, or it's a PHI that's in the progress of being computed
3464          // by createNodeForPHI.  In the former case, additional loop trip
3465          // count information isn't going to change anything. In the later
3466          // case, createNodeForPHI will perform the necessary updates on its
3467          // own when it gets to that point.
3468          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
3469            ValuesAtScopes.erase(It->second);
3470            Scalars.erase(It);
3471          }
3472          if (PHINode *PN = dyn_cast<PHINode>(I))
3473            ConstantEvolutionLoopExitValue.erase(PN);
3474        }
3475
3476        PushDefUseChildren(I, Worklist);
3477      }
3478    }
3479  }
3480  return Pair.first->second;
3481}
3482
3483/// forgetLoop - This method should be called by the client when it has
3484/// changed a loop in a way that may effect ScalarEvolution's ability to
3485/// compute a trip count, or if the loop is deleted.
3486void ScalarEvolution::forgetLoop(const Loop *L) {
3487  // Drop any stored trip count value.
3488  BackedgeTakenCounts.erase(L);
3489
3490  // Drop information about expressions based on loop-header PHIs.
3491  SmallVector<Instruction *, 16> Worklist;
3492  PushLoopPHIs(L, Worklist);
3493
3494  SmallPtrSet<Instruction *, 8> Visited;
3495  while (!Worklist.empty()) {
3496    Instruction *I = Worklist.pop_back_val();
3497    if (!Visited.insert(I)) continue;
3498
3499    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3500      Scalars.find(static_cast<Value *>(I));
3501    if (It != Scalars.end()) {
3502      ValuesAtScopes.erase(It->second);
3503      Scalars.erase(It);
3504      if (PHINode *PN = dyn_cast<PHINode>(I))
3505        ConstantEvolutionLoopExitValue.erase(PN);
3506    }
3507
3508    PushDefUseChildren(I, Worklist);
3509  }
3510}
3511
3512/// forgetValue - This method should be called by the client when it has
3513/// changed a value in a way that may effect its value, or which may
3514/// disconnect it from a def-use chain linking it to a loop.
3515void ScalarEvolution::forgetValue(Value *V) {
3516  Instruction *I = dyn_cast<Instruction>(V);
3517  if (!I) return;
3518
3519  // Drop information about expressions based on loop-header PHIs.
3520  SmallVector<Instruction *, 16> Worklist;
3521  Worklist.push_back(I);
3522
3523  SmallPtrSet<Instruction *, 8> Visited;
3524  while (!Worklist.empty()) {
3525    I = Worklist.pop_back_val();
3526    if (!Visited.insert(I)) continue;
3527
3528    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3529      Scalars.find(static_cast<Value *>(I));
3530    if (It != Scalars.end()) {
3531      ValuesAtScopes.erase(It->second);
3532      Scalars.erase(It);
3533      if (PHINode *PN = dyn_cast<PHINode>(I))
3534        ConstantEvolutionLoopExitValue.erase(PN);
3535    }
3536
3537    PushDefUseChildren(I, Worklist);
3538  }
3539}
3540
3541/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3542/// of the specified loop will execute.
3543ScalarEvolution::BackedgeTakenInfo
3544ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3545  SmallVector<BasicBlock *, 8> ExitingBlocks;
3546  L->getExitingBlocks(ExitingBlocks);
3547
3548  // Examine all exits and pick the most conservative values.
3549  const SCEV *BECount = getCouldNotCompute();
3550  const SCEV *MaxBECount = getCouldNotCompute();
3551  bool CouldNotComputeBECount = false;
3552  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3553    BackedgeTakenInfo NewBTI =
3554      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3555
3556    if (NewBTI.Exact == getCouldNotCompute()) {
3557      // We couldn't compute an exact value for this exit, so
3558      // we won't be able to compute an exact value for the loop.
3559      CouldNotComputeBECount = true;
3560      BECount = getCouldNotCompute();
3561    } else if (!CouldNotComputeBECount) {
3562      if (BECount == getCouldNotCompute())
3563        BECount = NewBTI.Exact;
3564      else
3565        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3566    }
3567    if (MaxBECount == getCouldNotCompute())
3568      MaxBECount = NewBTI.Max;
3569    else if (NewBTI.Max != getCouldNotCompute())
3570      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3571  }
3572
3573  return BackedgeTakenInfo(BECount, MaxBECount);
3574}
3575
3576/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3577/// of the specified loop will execute if it exits via the specified block.
3578ScalarEvolution::BackedgeTakenInfo
3579ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3580                                                   BasicBlock *ExitingBlock) {
3581
3582  // Okay, we've chosen an exiting block.  See what condition causes us to
3583  // exit at this block.
3584  //
3585  // FIXME: we should be able to handle switch instructions (with a single exit)
3586  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3587  if (ExitBr == 0) return getCouldNotCompute();
3588  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3589
3590  // At this point, we know we have a conditional branch that determines whether
3591  // the loop is exited.  However, we don't know if the branch is executed each
3592  // time through the loop.  If not, then the execution count of the branch will
3593  // not be equal to the trip count of the loop.
3594  //
3595  // Currently we check for this by checking to see if the Exit branch goes to
3596  // the loop header.  If so, we know it will always execute the same number of
3597  // times as the loop.  We also handle the case where the exit block *is* the
3598  // loop header.  This is common for un-rotated loops.
3599  //
3600  // If both of those tests fail, walk up the unique predecessor chain to the
3601  // header, stopping if there is an edge that doesn't exit the loop. If the
3602  // header is reached, the execution count of the branch will be equal to the
3603  // trip count of the loop.
3604  //
3605  //  More extensive analysis could be done to handle more cases here.
3606  //
3607  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3608      ExitBr->getSuccessor(1) != L->getHeader() &&
3609      ExitBr->getParent() != L->getHeader()) {
3610    // The simple checks failed, try climbing the unique predecessor chain
3611    // up to the header.
3612    bool Ok = false;
3613    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3614      BasicBlock *Pred = BB->getUniquePredecessor();
3615      if (!Pred)
3616        return getCouldNotCompute();
3617      TerminatorInst *PredTerm = Pred->getTerminator();
3618      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3619        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3620        if (PredSucc == BB)
3621          continue;
3622        // If the predecessor has a successor that isn't BB and isn't
3623        // outside the loop, assume the worst.
3624        if (L->contains(PredSucc))
3625          return getCouldNotCompute();
3626      }
3627      if (Pred == L->getHeader()) {
3628        Ok = true;
3629        break;
3630      }
3631      BB = Pred;
3632    }
3633    if (!Ok)
3634      return getCouldNotCompute();
3635  }
3636
3637  // Proceed to the next level to examine the exit condition expression.
3638  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3639                                               ExitBr->getSuccessor(0),
3640                                               ExitBr->getSuccessor(1));
3641}
3642
3643/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3644/// backedge of the specified loop will execute if its exit condition
3645/// were a conditional branch of ExitCond, TBB, and FBB.
3646ScalarEvolution::BackedgeTakenInfo
3647ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3648                                                       Value *ExitCond,
3649                                                       BasicBlock *TBB,
3650                                                       BasicBlock *FBB) {
3651  // Check if the controlling expression for this loop is an And or Or.
3652  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3653    if (BO->getOpcode() == Instruction::And) {
3654      // Recurse on the operands of the and.
3655      BackedgeTakenInfo BTI0 =
3656        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3657      BackedgeTakenInfo BTI1 =
3658        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3659      const SCEV *BECount = getCouldNotCompute();
3660      const SCEV *MaxBECount = getCouldNotCompute();
3661      if (L->contains(TBB)) {
3662        // Both conditions must be true for the loop to continue executing.
3663        // Choose the less conservative count.
3664        if (BTI0.Exact == getCouldNotCompute() ||
3665            BTI1.Exact == getCouldNotCompute())
3666          BECount = getCouldNotCompute();
3667        else
3668          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3669        if (BTI0.Max == getCouldNotCompute())
3670          MaxBECount = BTI1.Max;
3671        else if (BTI1.Max == getCouldNotCompute())
3672          MaxBECount = BTI0.Max;
3673        else
3674          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3675      } else {
3676        // Both conditions must be true for the loop to exit.
3677        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3678        if (BTI0.Exact != getCouldNotCompute() &&
3679            BTI1.Exact != getCouldNotCompute())
3680          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3681        if (BTI0.Max != getCouldNotCompute() &&
3682            BTI1.Max != getCouldNotCompute())
3683          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3684      }
3685
3686      return BackedgeTakenInfo(BECount, MaxBECount);
3687    }
3688    if (BO->getOpcode() == Instruction::Or) {
3689      // Recurse on the operands of the or.
3690      BackedgeTakenInfo BTI0 =
3691        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3692      BackedgeTakenInfo BTI1 =
3693        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3694      const SCEV *BECount = getCouldNotCompute();
3695      const SCEV *MaxBECount = getCouldNotCompute();
3696      if (L->contains(FBB)) {
3697        // Both conditions must be false for the loop to continue executing.
3698        // Choose the less conservative count.
3699        if (BTI0.Exact == getCouldNotCompute() ||
3700            BTI1.Exact == getCouldNotCompute())
3701          BECount = getCouldNotCompute();
3702        else
3703          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3704        if (BTI0.Max == getCouldNotCompute())
3705          MaxBECount = BTI1.Max;
3706        else if (BTI1.Max == getCouldNotCompute())
3707          MaxBECount = BTI0.Max;
3708        else
3709          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3710      } else {
3711        // Both conditions must be false for the loop to exit.
3712        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3713        if (BTI0.Exact != getCouldNotCompute() &&
3714            BTI1.Exact != getCouldNotCompute())
3715          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3716        if (BTI0.Max != getCouldNotCompute() &&
3717            BTI1.Max != getCouldNotCompute())
3718          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3719      }
3720
3721      return BackedgeTakenInfo(BECount, MaxBECount);
3722    }
3723  }
3724
3725  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3726  // Proceed to the next level to examine the icmp.
3727  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3728    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3729
3730  // Check for a constant condition. These are normally stripped out by
3731  // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
3732  // preserve the CFG and is temporarily leaving constant conditions
3733  // in place.
3734  if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
3735    if (L->contains(FBB) == !CI->getZExtValue())
3736      // The backedge is always taken.
3737      return getCouldNotCompute();
3738    else
3739      // The backedge is never taken.
3740      return getIntegerSCEV(0, CI->getType());
3741  }
3742
3743  // If it's not an integer or pointer comparison then compute it the hard way.
3744  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3745}
3746
3747/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3748/// backedge of the specified loop will execute if its exit condition
3749/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3750ScalarEvolution::BackedgeTakenInfo
3751ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3752                                                           ICmpInst *ExitCond,
3753                                                           BasicBlock *TBB,
3754                                                           BasicBlock *FBB) {
3755
3756  // If the condition was exit on true, convert the condition to exit on false
3757  ICmpInst::Predicate Cond;
3758  if (!L->contains(FBB))
3759    Cond = ExitCond->getPredicate();
3760  else
3761    Cond = ExitCond->getInversePredicate();
3762
3763  // Handle common loops like: for (X = "string"; *X; ++X)
3764  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3765    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3766      BackedgeTakenInfo ItCnt =
3767        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3768      if (ItCnt.hasAnyInfo())
3769        return ItCnt;
3770    }
3771
3772  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3773  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3774
3775  // Try to evaluate any dependencies out of the loop.
3776  LHS = getSCEVAtScope(LHS, L);
3777  RHS = getSCEVAtScope(RHS, L);
3778
3779  // At this point, we would like to compute how many iterations of the
3780  // loop the predicate will return true for these inputs.
3781  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3782    // If there is a loop-invariant, force it into the RHS.
3783    std::swap(LHS, RHS);
3784    Cond = ICmpInst::getSwappedPredicate(Cond);
3785  }
3786
3787  // If we have a comparison of a chrec against a constant, try to use value
3788  // ranges to answer this query.
3789  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3790    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3791      if (AddRec->getLoop() == L) {
3792        // Form the constant range.
3793        ConstantRange CompRange(
3794            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3795
3796        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3797        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3798      }
3799
3800  switch (Cond) {
3801  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3802    // Convert to: while (X-Y != 0)
3803    BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3804    if (BTI.hasAnyInfo()) return BTI;
3805    break;
3806  }
3807  case ICmpInst::ICMP_EQ: {                     // while (X == Y)
3808    // Convert to: while (X-Y == 0)
3809    BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3810    if (BTI.hasAnyInfo()) return BTI;
3811    break;
3812  }
3813  case ICmpInst::ICMP_SLT: {
3814    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3815    if (BTI.hasAnyInfo()) return BTI;
3816    break;
3817  }
3818  case ICmpInst::ICMP_SGT: {
3819    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3820                                             getNotSCEV(RHS), L, true);
3821    if (BTI.hasAnyInfo()) return BTI;
3822    break;
3823  }
3824  case ICmpInst::ICMP_ULT: {
3825    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3826    if (BTI.hasAnyInfo()) return BTI;
3827    break;
3828  }
3829  case ICmpInst::ICMP_UGT: {
3830    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3831                                             getNotSCEV(RHS), L, false);
3832    if (BTI.hasAnyInfo()) return BTI;
3833    break;
3834  }
3835  default:
3836#if 0
3837    dbgs() << "ComputeBackedgeTakenCount ";
3838    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3839      dbgs() << "[unsigned] ";
3840    dbgs() << *LHS << "   "
3841         << Instruction::getOpcodeName(Instruction::ICmp)
3842         << "   " << *RHS << "\n";
3843#endif
3844    break;
3845  }
3846  return
3847    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3848}
3849
3850static ConstantInt *
3851EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3852                                ScalarEvolution &SE) {
3853  const SCEV *InVal = SE.getConstant(C);
3854  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3855  assert(isa<SCEVConstant>(Val) &&
3856         "Evaluation of SCEV at constant didn't fold correctly?");
3857  return cast<SCEVConstant>(Val)->getValue();
3858}
3859
3860/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3861/// and a GEP expression (missing the pointer index) indexing into it, return
3862/// the addressed element of the initializer or null if the index expression is
3863/// invalid.
3864static Constant *
3865GetAddressedElementFromGlobal(GlobalVariable *GV,
3866                              const std::vector<ConstantInt*> &Indices) {
3867  Constant *Init = GV->getInitializer();
3868  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3869    uint64_t Idx = Indices[i]->getZExtValue();
3870    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3871      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3872      Init = cast<Constant>(CS->getOperand(Idx));
3873    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3874      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3875      Init = cast<Constant>(CA->getOperand(Idx));
3876    } else if (isa<ConstantAggregateZero>(Init)) {
3877      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3878        assert(Idx < STy->getNumElements() && "Bad struct index!");
3879        Init = Constant::getNullValue(STy->getElementType(Idx));
3880      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3881        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3882        Init = Constant::getNullValue(ATy->getElementType());
3883      } else {
3884        llvm_unreachable("Unknown constant aggregate type!");
3885      }
3886      return 0;
3887    } else {
3888      return 0; // Unknown initializer type
3889    }
3890  }
3891  return Init;
3892}
3893
3894/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3895/// 'icmp op load X, cst', try to see if we can compute the backedge
3896/// execution count.
3897ScalarEvolution::BackedgeTakenInfo
3898ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3899                                                LoadInst *LI,
3900                                                Constant *RHS,
3901                                                const Loop *L,
3902                                                ICmpInst::Predicate predicate) {
3903  if (LI->isVolatile()) return getCouldNotCompute();
3904
3905  // Check to see if the loaded pointer is a getelementptr of a global.
3906  // TODO: Use SCEV instead of manually grubbing with GEPs.
3907  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3908  if (!GEP) return getCouldNotCompute();
3909
3910  // Make sure that it is really a constant global we are gepping, with an
3911  // initializer, and make sure the first IDX is really 0.
3912  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3913  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
3914      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3915      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3916    return getCouldNotCompute();
3917
3918  // Okay, we allow one non-constant index into the GEP instruction.
3919  Value *VarIdx = 0;
3920  std::vector<ConstantInt*> Indexes;
3921  unsigned VarIdxNum = 0;
3922  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3923    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3924      Indexes.push_back(CI);
3925    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3926      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3927      VarIdx = GEP->getOperand(i);
3928      VarIdxNum = i-2;
3929      Indexes.push_back(0);
3930    }
3931
3932  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3933  // Check to see if X is a loop variant variable value now.
3934  const SCEV *Idx = getSCEV(VarIdx);
3935  Idx = getSCEVAtScope(Idx, L);
3936
3937  // We can only recognize very limited forms of loop index expressions, in
3938  // particular, only affine AddRec's like {C1,+,C2}.
3939  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3940  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3941      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3942      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3943    return getCouldNotCompute();
3944
3945  unsigned MaxSteps = MaxBruteForceIterations;
3946  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3947    ConstantInt *ItCst = ConstantInt::get(
3948                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
3949    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3950
3951    // Form the GEP offset.
3952    Indexes[VarIdxNum] = Val;
3953
3954    Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3955    if (Result == 0) break;  // Cannot compute!
3956
3957    // Evaluate the condition for this iteration.
3958    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3959    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3960    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3961#if 0
3962      dbgs() << "\n***\n*** Computed loop count " << *ItCst
3963             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3964             << "***\n";
3965#endif
3966      ++NumArrayLenItCounts;
3967      return getConstant(ItCst);   // Found terminating iteration!
3968    }
3969  }
3970  return getCouldNotCompute();
3971}
3972
3973
3974/// CanConstantFold - Return true if we can constant fold an instruction of the
3975/// specified type, assuming that all operands were constants.
3976static bool CanConstantFold(const Instruction *I) {
3977  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3978      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3979    return true;
3980
3981  if (const CallInst *CI = dyn_cast<CallInst>(I))
3982    if (const Function *F = CI->getCalledFunction())
3983      return canConstantFoldCallTo(F);
3984  return false;
3985}
3986
3987/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3988/// in the loop that V is derived from.  We allow arbitrary operations along the
3989/// way, but the operands of an operation must either be constants or a value
3990/// derived from a constant PHI.  If this expression does not fit with these
3991/// constraints, return null.
3992static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3993  // If this is not an instruction, or if this is an instruction outside of the
3994  // loop, it can't be derived from a loop PHI.
3995  Instruction *I = dyn_cast<Instruction>(V);
3996  if (I == 0 || !L->contains(I)) return 0;
3997
3998  if (PHINode *PN = dyn_cast<PHINode>(I)) {
3999    if (L->getHeader() == I->getParent())
4000      return PN;
4001    else
4002      // We don't currently keep track of the control flow needed to evaluate
4003      // PHIs, so we cannot handle PHIs inside of loops.
4004      return 0;
4005  }
4006
4007  // If we won't be able to constant fold this expression even if the operands
4008  // are constants, return early.
4009  if (!CanConstantFold(I)) return 0;
4010
4011  // Otherwise, we can evaluate this instruction if all of its operands are
4012  // constant or derived from a PHI node themselves.
4013  PHINode *PHI = 0;
4014  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
4015    if (!(isa<Constant>(I->getOperand(Op)) ||
4016          isa<GlobalValue>(I->getOperand(Op)))) {
4017      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
4018      if (P == 0) return 0;  // Not evolving from PHI
4019      if (PHI == 0)
4020        PHI = P;
4021      else if (PHI != P)
4022        return 0;  // Evolving from multiple different PHIs.
4023    }
4024
4025  // This is a expression evolving from a constant PHI!
4026  return PHI;
4027}
4028
4029/// EvaluateExpression - Given an expression that passes the
4030/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4031/// in the loop has the value PHIVal.  If we can't fold this expression for some
4032/// reason, return null.
4033static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
4034                                    const TargetData *TD) {
4035  if (isa<PHINode>(V)) return PHIVal;
4036  if (Constant *C = dyn_cast<Constant>(V)) return C;
4037  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
4038  Instruction *I = cast<Instruction>(V);
4039
4040  std::vector<Constant*> Operands;
4041  Operands.resize(I->getNumOperands());
4042
4043  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4044    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
4045    if (Operands[i] == 0) return 0;
4046  }
4047
4048  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4049    return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4050                                           Operands[1], TD);
4051  return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4052                                  &Operands[0], Operands.size(), TD);
4053}
4054
4055/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4056/// in the header of its containing loop, we know the loop executes a
4057/// constant number of times, and the PHI node is just a recurrence
4058/// involving constants, fold it.
4059Constant *
4060ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4061                                                   const APInt &BEs,
4062                                                   const Loop *L) {
4063  std::map<PHINode*, Constant*>::iterator I =
4064    ConstantEvolutionLoopExitValue.find(PN);
4065  if (I != ConstantEvolutionLoopExitValue.end())
4066    return I->second;
4067
4068  if (BEs.ugt(MaxBruteForceIterations))
4069    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
4070
4071  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4072
4073  // Since the loop is canonicalized, the PHI node must have two entries.  One
4074  // entry must be a constant (coming in from outside of the loop), and the
4075  // second must be derived from the same PHI.
4076  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4077  Constant *StartCST =
4078    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4079  if (StartCST == 0)
4080    return RetVal = 0;  // Must be a constant.
4081
4082  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4083  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4084  if (PN2 != PN)
4085    return RetVal = 0;  // Not derived from same PHI.
4086
4087  // Execute the loop symbolically to determine the exit value.
4088  if (BEs.getActiveBits() >= 32)
4089    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4090
4091  unsigned NumIterations = BEs.getZExtValue(); // must be in range
4092  unsigned IterationNum = 0;
4093  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
4094    if (IterationNum == NumIterations)
4095      return RetVal = PHIVal;  // Got exit value!
4096
4097    // Compute the value of the PHI node for the next iteration.
4098    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4099    if (NextPHI == PHIVal)
4100      return RetVal = NextPHI;  // Stopped evolving!
4101    if (NextPHI == 0)
4102      return 0;        // Couldn't evaluate!
4103    PHIVal = NextPHI;
4104  }
4105}
4106
4107/// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4108/// constant number of times (the condition evolves only from constants),
4109/// try to evaluate a few iterations of the loop until we get the exit
4110/// condition gets a value of ExitWhen (true or false).  If we cannot
4111/// evaluate the trip count of the loop, return getCouldNotCompute().
4112const SCEV *
4113ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
4114                                                       Value *Cond,
4115                                                       bool ExitWhen) {
4116  PHINode *PN = getConstantEvolvingPHI(Cond, L);
4117  if (PN == 0) return getCouldNotCompute();
4118
4119  // Since the loop is canonicalized, the PHI node must have two entries.  One
4120  // entry must be a constant (coming in from outside of the loop), and the
4121  // second must be derived from the same PHI.
4122  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4123  Constant *StartCST =
4124    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4125  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
4126
4127  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4128  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4129  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
4130
4131  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
4132  // the loop symbolically to determine when the condition gets a value of
4133  // "ExitWhen".
4134  unsigned IterationNum = 0;
4135  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
4136  for (Constant *PHIVal = StartCST;
4137       IterationNum != MaxIterations; ++IterationNum) {
4138    ConstantInt *CondVal =
4139      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
4140
4141    // Couldn't symbolically evaluate.
4142    if (!CondVal) return getCouldNotCompute();
4143
4144    if (CondVal->getValue() == uint64_t(ExitWhen)) {
4145      ++NumBruteForceTripCountsComputed;
4146      return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4147    }
4148
4149    // Compute the value of the PHI node for the next iteration.
4150    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4151    if (NextPHI == 0 || NextPHI == PHIVal)
4152      return getCouldNotCompute();// Couldn't evaluate or not making progress...
4153    PHIVal = NextPHI;
4154  }
4155
4156  // Too many iterations were needed to evaluate.
4157  return getCouldNotCompute();
4158}
4159
4160/// getSCEVAtScope - Return a SCEV expression for the specified value
4161/// at the specified scope in the program.  The L value specifies a loop
4162/// nest to evaluate the expression at, where null is the top-level or a
4163/// specified loop is immediately inside of the loop.
4164///
4165/// This method can be used to compute the exit value for a variable defined
4166/// in a loop by querying what the value will hold in the parent loop.
4167///
4168/// In the case that a relevant loop exit value cannot be computed, the
4169/// original value V is returned.
4170const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4171  // Check to see if we've folded this expression at this loop before.
4172  std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4173  std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4174    Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4175  if (!Pair.second)
4176    return Pair.first->second ? Pair.first->second : V;
4177
4178  // Otherwise compute it.
4179  const SCEV *C = computeSCEVAtScope(V, L);
4180  ValuesAtScopes[V][L] = C;
4181  return C;
4182}
4183
4184const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4185  if (isa<SCEVConstant>(V)) return V;
4186
4187  // If this instruction is evolved from a constant-evolving PHI, compute the
4188  // exit value from the loop without using SCEVs.
4189  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4190    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4191      const Loop *LI = (*this->LI)[I->getParent()];
4192      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
4193        if (PHINode *PN = dyn_cast<PHINode>(I))
4194          if (PN->getParent() == LI->getHeader()) {
4195            // Okay, there is no closed form solution for the PHI node.  Check
4196            // to see if the loop that contains it has a known backedge-taken
4197            // count.  If so, we may be able to force computation of the exit
4198            // value.
4199            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4200            if (const SCEVConstant *BTCC =
4201                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4202              // Okay, we know how many times the containing loop executes.  If
4203              // this is a constant evolving PHI node, get the final value at
4204              // the specified iteration number.
4205              Constant *RV = getConstantEvolutionLoopExitValue(PN,
4206                                                   BTCC->getValue()->getValue(),
4207                                                               LI);
4208              if (RV) return getSCEV(RV);
4209            }
4210          }
4211
4212      // Okay, this is an expression that we cannot symbolically evaluate
4213      // into a SCEV.  Check to see if it's possible to symbolically evaluate
4214      // the arguments into constants, and if so, try to constant propagate the
4215      // result.  This is particularly useful for computing loop exit values.
4216      if (CanConstantFold(I)) {
4217        std::vector<Constant*> Operands;
4218        Operands.reserve(I->getNumOperands());
4219        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4220          Value *Op = I->getOperand(i);
4221          if (Constant *C = dyn_cast<Constant>(Op)) {
4222            Operands.push_back(C);
4223          } else {
4224            // If any of the operands is non-constant and if they are
4225            // non-integer and non-pointer, don't even try to analyze them
4226            // with scev techniques.
4227            if (!isSCEVable(Op->getType()))
4228              return V;
4229
4230            const SCEV *OpV = getSCEVAtScope(Op, L);
4231            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
4232              Constant *C = SC->getValue();
4233              if (C->getType() != Op->getType())
4234                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4235                                                                  Op->getType(),
4236                                                                  false),
4237                                          C, Op->getType());
4238              Operands.push_back(C);
4239            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
4240              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
4241                if (C->getType() != Op->getType())
4242                  C =
4243                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4244                                                                  Op->getType(),
4245                                                                  false),
4246                                          C, Op->getType());
4247                Operands.push_back(C);
4248              } else
4249                return V;
4250            } else {
4251              return V;
4252            }
4253          }
4254        }
4255
4256        Constant *C = 0;
4257        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4258          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4259                                              Operands[0], Operands[1], TD);
4260        else
4261          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4262                                       &Operands[0], Operands.size(), TD);
4263        if (C)
4264          return getSCEV(C);
4265      }
4266    }
4267
4268    // This is some other type of SCEVUnknown, just return it.
4269    return V;
4270  }
4271
4272  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
4273    // Avoid performing the look-up in the common case where the specified
4274    // expression has no loop-variant portions.
4275    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
4276      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4277      if (OpAtScope != Comm->getOperand(i)) {
4278        // Okay, at least one of these operands is loop variant but might be
4279        // foldable.  Build a new instance of the folded commutative expression.
4280        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
4281                                            Comm->op_begin()+i);
4282        NewOps.push_back(OpAtScope);
4283
4284        for (++i; i != e; ++i) {
4285          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4286          NewOps.push_back(OpAtScope);
4287        }
4288        if (isa<SCEVAddExpr>(Comm))
4289          return getAddExpr(NewOps);
4290        if (isa<SCEVMulExpr>(Comm))
4291          return getMulExpr(NewOps);
4292        if (isa<SCEVSMaxExpr>(Comm))
4293          return getSMaxExpr(NewOps);
4294        if (isa<SCEVUMaxExpr>(Comm))
4295          return getUMaxExpr(NewOps);
4296        llvm_unreachable("Unknown commutative SCEV type!");
4297      }
4298    }
4299    // If we got here, all operands are loop invariant.
4300    return Comm;
4301  }
4302
4303  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
4304    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
4305    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
4306    if (LHS == Div->getLHS() && RHS == Div->getRHS())
4307      return Div;   // must be loop invariant
4308    return getUDivExpr(LHS, RHS);
4309  }
4310
4311  // If this is a loop recurrence for a loop that does not contain L, then we
4312  // are dealing with the final value computed by the loop.
4313  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4314    if (!L || !AddRec->getLoop()->contains(L)) {
4315      // To evaluate this recurrence, we need to know how many times the AddRec
4316      // loop iterates.  Compute this now.
4317      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
4318      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
4319
4320      // Then, evaluate the AddRec.
4321      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
4322    }
4323    return AddRec;
4324  }
4325
4326  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
4327    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4328    if (Op == Cast->getOperand())
4329      return Cast;  // must be loop invariant
4330    return getZeroExtendExpr(Op, Cast->getType());
4331  }
4332
4333  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
4334    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4335    if (Op == Cast->getOperand())
4336      return Cast;  // must be loop invariant
4337    return getSignExtendExpr(Op, Cast->getType());
4338  }
4339
4340  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
4341    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4342    if (Op == Cast->getOperand())
4343      return Cast;  // must be loop invariant
4344    return getTruncateExpr(Op, Cast->getType());
4345  }
4346
4347  llvm_unreachable("Unknown SCEV type!");
4348  return 0;
4349}
4350
4351/// getSCEVAtScope - This is a convenience function which does
4352/// getSCEVAtScope(getSCEV(V), L).
4353const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
4354  return getSCEVAtScope(getSCEV(V), L);
4355}
4356
4357/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4358/// following equation:
4359///
4360///     A * X = B (mod N)
4361///
4362/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4363/// A and B isn't important.
4364///
4365/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4366static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
4367                                               ScalarEvolution &SE) {
4368  uint32_t BW = A.getBitWidth();
4369  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
4370  assert(A != 0 && "A must be non-zero.");
4371
4372  // 1. D = gcd(A, N)
4373  //
4374  // The gcd of A and N may have only one prime factor: 2. The number of
4375  // trailing zeros in A is its multiplicity
4376  uint32_t Mult2 = A.countTrailingZeros();
4377  // D = 2^Mult2
4378
4379  // 2. Check if B is divisible by D.
4380  //
4381  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4382  // is not less than multiplicity of this prime factor for D.
4383  if (B.countTrailingZeros() < Mult2)
4384    return SE.getCouldNotCompute();
4385
4386  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4387  // modulo (N / D).
4388  //
4389  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4390  // bit width during computations.
4391  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4392  APInt Mod(BW + 1, 0);
4393  Mod.set(BW - Mult2);  // Mod = N / D
4394  APInt I = AD.multiplicativeInverse(Mod);
4395
4396  // 4. Compute the minimum unsigned root of the equation:
4397  // I * (B / D) mod (N / D)
4398  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4399
4400  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4401  // bits.
4402  return SE.getConstant(Result.trunc(BW));
4403}
4404
4405/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4406/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4407/// might be the same) or two SCEVCouldNotCompute objects.
4408///
4409static std::pair<const SCEV *,const SCEV *>
4410SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4411  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4412  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4413  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4414  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4415
4416  // We currently can only solve this if the coefficients are constants.
4417  if (!LC || !MC || !NC) {
4418    const SCEV *CNC = SE.getCouldNotCompute();
4419    return std::make_pair(CNC, CNC);
4420  }
4421
4422  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4423  const APInt &L = LC->getValue()->getValue();
4424  const APInt &M = MC->getValue()->getValue();
4425  const APInt &N = NC->getValue()->getValue();
4426  APInt Two(BitWidth, 2);
4427  APInt Four(BitWidth, 4);
4428
4429  {
4430    using namespace APIntOps;
4431    const APInt& C = L;
4432    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4433    // The B coefficient is M-N/2
4434    APInt B(M);
4435    B -= sdiv(N,Two);
4436
4437    // The A coefficient is N/2
4438    APInt A(N.sdiv(Two));
4439
4440    // Compute the B^2-4ac term.
4441    APInt SqrtTerm(B);
4442    SqrtTerm *= B;
4443    SqrtTerm -= Four * (A * C);
4444
4445    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4446    // integer value or else APInt::sqrt() will assert.
4447    APInt SqrtVal(SqrtTerm.sqrt());
4448
4449    // Compute the two solutions for the quadratic formula.
4450    // The divisions must be performed as signed divisions.
4451    APInt NegB(-B);
4452    APInt TwoA( A << 1 );
4453    if (TwoA.isMinValue()) {
4454      const SCEV *CNC = SE.getCouldNotCompute();
4455      return std::make_pair(CNC, CNC);
4456    }
4457
4458    LLVMContext &Context = SE.getContext();
4459
4460    ConstantInt *Solution1 =
4461      ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4462    ConstantInt *Solution2 =
4463      ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4464
4465    return std::make_pair(SE.getConstant(Solution1),
4466                          SE.getConstant(Solution2));
4467    } // end APIntOps namespace
4468}
4469
4470/// HowFarToZero - Return the number of times a backedge comparing the specified
4471/// value to zero will execute.  If not computable, return CouldNotCompute.
4472ScalarEvolution::BackedgeTakenInfo
4473ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4474  // If the value is a constant
4475  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4476    // If the value is already zero, the branch will execute zero times.
4477    if (C->getValue()->isZero()) return C;
4478    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4479  }
4480
4481  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4482  if (!AddRec || AddRec->getLoop() != L)
4483    return getCouldNotCompute();
4484
4485  if (AddRec->isAffine()) {
4486    // If this is an affine expression, the execution count of this branch is
4487    // the minimum unsigned root of the following equation:
4488    //
4489    //     Start + Step*N = 0 (mod 2^BW)
4490    //
4491    // equivalent to:
4492    //
4493    //             Step*N = -Start (mod 2^BW)
4494    //
4495    // where BW is the common bit width of Start and Step.
4496
4497    // Get the initial value for the loop.
4498    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4499                                       L->getParentLoop());
4500    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4501                                      L->getParentLoop());
4502
4503    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4504      // For now we handle only constant steps.
4505
4506      // First, handle unitary steps.
4507      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4508        return getNegativeSCEV(Start);          //   N = -Start (as unsigned)
4509      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4510        return Start;                           //    N = Start (as unsigned)
4511
4512      // Then, try to solve the above equation provided that Start is constant.
4513      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4514        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4515                                            -StartC->getValue()->getValue(),
4516                                            *this);
4517    }
4518  } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
4519    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4520    // the quadratic equation to solve it.
4521    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4522                                                                    *this);
4523    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4524    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4525    if (R1) {
4526#if 0
4527      dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
4528             << "  sol#2: " << *R2 << "\n";
4529#endif
4530      // Pick the smallest positive root value.
4531      if (ConstantInt *CB =
4532          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4533                                   R1->getValue(), R2->getValue()))) {
4534        if (CB->getZExtValue() == false)
4535          std::swap(R1, R2);   // R1 is the minimum root now.
4536
4537        // We can only use this value if the chrec ends up with an exact zero
4538        // value at this index.  When solving for "X*X != 5", for example, we
4539        // should not accept a root of 2.
4540        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4541        if (Val->isZero())
4542          return R1;  // We found a quadratic root!
4543      }
4544    }
4545  }
4546
4547  return getCouldNotCompute();
4548}
4549
4550/// HowFarToNonZero - Return the number of times a backedge checking the
4551/// specified value for nonzero will execute.  If not computable, return
4552/// CouldNotCompute
4553ScalarEvolution::BackedgeTakenInfo
4554ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4555  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4556  // handle them yet except for the trivial case.  This could be expanded in the
4557  // future as needed.
4558
4559  // If the value is a constant, check to see if it is known to be non-zero
4560  // already.  If so, the backedge will execute zero times.
4561  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4562    if (!C->getValue()->isNullValue())
4563      return getIntegerSCEV(0, C->getType());
4564    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4565  }
4566
4567  // We could implement others, but I really doubt anyone writes loops like
4568  // this, and if they did, they would already be constant folded.
4569  return getCouldNotCompute();
4570}
4571
4572/// getLoopPredecessor - If the given loop's header has exactly one unique
4573/// predecessor outside the loop, return it. Otherwise return null.
4574///
4575BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4576  BasicBlock *Header = L->getHeader();
4577  BasicBlock *Pred = 0;
4578  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4579       PI != E; ++PI)
4580    if (!L->contains(*PI)) {
4581      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4582      Pred = *PI;
4583    }
4584  return Pred;
4585}
4586
4587/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4588/// (which may not be an immediate predecessor) which has exactly one
4589/// successor from which BB is reachable, or null if no such block is
4590/// found.
4591///
4592BasicBlock *
4593ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4594  // If the block has a unique predecessor, then there is no path from the
4595  // predecessor to the block that does not go through the direct edge
4596  // from the predecessor to the block.
4597  if (BasicBlock *Pred = BB->getSinglePredecessor())
4598    return Pred;
4599
4600  // A loop's header is defined to be a block that dominates the loop.
4601  // If the header has a unique predecessor outside the loop, it must be
4602  // a block that has exactly one successor that can reach the loop.
4603  if (Loop *L = LI->getLoopFor(BB))
4604    return getLoopPredecessor(L);
4605
4606  return 0;
4607}
4608
4609/// HasSameValue - SCEV structural equivalence is usually sufficient for
4610/// testing whether two expressions are equal, however for the purposes of
4611/// looking for a condition guarding a loop, it can be useful to be a little
4612/// more general, since a front-end may have replicated the controlling
4613/// expression.
4614///
4615static bool HasSameValue(const SCEV *A, const SCEV *B) {
4616  // Quick check to see if they are the same SCEV.
4617  if (A == B) return true;
4618
4619  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4620  // two different instructions with the same value. Check for this case.
4621  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4622    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4623      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4624        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4625          if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
4626            return true;
4627
4628  // Otherwise assume they may have a different value.
4629  return false;
4630}
4631
4632bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4633  return getSignedRange(S).getSignedMax().isNegative();
4634}
4635
4636bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4637  return getSignedRange(S).getSignedMin().isStrictlyPositive();
4638}
4639
4640bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4641  return !getSignedRange(S).getSignedMin().isNegative();
4642}
4643
4644bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4645  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4646}
4647
4648bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4649  return isKnownNegative(S) || isKnownPositive(S);
4650}
4651
4652bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4653                                       const SCEV *LHS, const SCEV *RHS) {
4654
4655  if (HasSameValue(LHS, RHS))
4656    return ICmpInst::isTrueWhenEqual(Pred);
4657
4658  switch (Pred) {
4659  default:
4660    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4661    break;
4662  case ICmpInst::ICMP_SGT:
4663    Pred = ICmpInst::ICMP_SLT;
4664    std::swap(LHS, RHS);
4665  case ICmpInst::ICMP_SLT: {
4666    ConstantRange LHSRange = getSignedRange(LHS);
4667    ConstantRange RHSRange = getSignedRange(RHS);
4668    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4669      return true;
4670    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4671      return false;
4672    break;
4673  }
4674  case ICmpInst::ICMP_SGE:
4675    Pred = ICmpInst::ICMP_SLE;
4676    std::swap(LHS, RHS);
4677  case ICmpInst::ICMP_SLE: {
4678    ConstantRange LHSRange = getSignedRange(LHS);
4679    ConstantRange RHSRange = getSignedRange(RHS);
4680    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4681      return true;
4682    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4683      return false;
4684    break;
4685  }
4686  case ICmpInst::ICMP_UGT:
4687    Pred = ICmpInst::ICMP_ULT;
4688    std::swap(LHS, RHS);
4689  case ICmpInst::ICMP_ULT: {
4690    ConstantRange LHSRange = getUnsignedRange(LHS);
4691    ConstantRange RHSRange = getUnsignedRange(RHS);
4692    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4693      return true;
4694    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4695      return false;
4696    break;
4697  }
4698  case ICmpInst::ICMP_UGE:
4699    Pred = ICmpInst::ICMP_ULE;
4700    std::swap(LHS, RHS);
4701  case ICmpInst::ICMP_ULE: {
4702    ConstantRange LHSRange = getUnsignedRange(LHS);
4703    ConstantRange RHSRange = getUnsignedRange(RHS);
4704    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4705      return true;
4706    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4707      return false;
4708    break;
4709  }
4710  case ICmpInst::ICMP_NE: {
4711    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4712      return true;
4713    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4714      return true;
4715
4716    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4717    if (isKnownNonZero(Diff))
4718      return true;
4719    break;
4720  }
4721  case ICmpInst::ICMP_EQ:
4722    // The check at the top of the function catches the case where
4723    // the values are known to be equal.
4724    break;
4725  }
4726  return false;
4727}
4728
4729/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4730/// protected by a conditional between LHS and RHS.  This is used to
4731/// to eliminate casts.
4732bool
4733ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4734                                             ICmpInst::Predicate Pred,
4735                                             const SCEV *LHS, const SCEV *RHS) {
4736  // Interpret a null as meaning no loop, where there is obviously no guard
4737  // (interprocedural conditions notwithstanding).
4738  if (!L) return true;
4739
4740  BasicBlock *Latch = L->getLoopLatch();
4741  if (!Latch)
4742    return false;
4743
4744  BranchInst *LoopContinuePredicate =
4745    dyn_cast<BranchInst>(Latch->getTerminator());
4746  if (!LoopContinuePredicate ||
4747      LoopContinuePredicate->isUnconditional())
4748    return false;
4749
4750  return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4751                       LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4752}
4753
4754/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
4755/// by a conditional between LHS and RHS.  This is used to help avoid max
4756/// expressions in loop trip counts, and to eliminate casts.
4757bool
4758ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
4759                                          ICmpInst::Predicate Pred,
4760                                          const SCEV *LHS, const SCEV *RHS) {
4761  // Interpret a null as meaning no loop, where there is obviously no guard
4762  // (interprocedural conditions notwithstanding).
4763  if (!L) return false;
4764
4765  BasicBlock *Predecessor = getLoopPredecessor(L);
4766  BasicBlock *PredecessorDest = L->getHeader();
4767
4768  // Starting at the loop predecessor, climb up the predecessor chain, as long
4769  // as there are predecessors that can be found that have unique successors
4770  // leading to the original header.
4771  for (; Predecessor;
4772       PredecessorDest = Predecessor,
4773       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4774
4775    BranchInst *LoopEntryPredicate =
4776      dyn_cast<BranchInst>(Predecessor->getTerminator());
4777    if (!LoopEntryPredicate ||
4778        LoopEntryPredicate->isUnconditional())
4779      continue;
4780
4781    if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4782                      LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4783      return true;
4784  }
4785
4786  return false;
4787}
4788
4789/// isImpliedCond - Test whether the condition described by Pred, LHS,
4790/// and RHS is true whenever the given Cond value evaluates to true.
4791bool ScalarEvolution::isImpliedCond(Value *CondValue,
4792                                    ICmpInst::Predicate Pred,
4793                                    const SCEV *LHS, const SCEV *RHS,
4794                                    bool Inverse) {
4795  // Recursively handle And and Or conditions.
4796  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4797    if (BO->getOpcode() == Instruction::And) {
4798      if (!Inverse)
4799        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4800               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4801    } else if (BO->getOpcode() == Instruction::Or) {
4802      if (Inverse)
4803        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4804               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4805    }
4806  }
4807
4808  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4809  if (!ICI) return false;
4810
4811  // Bail if the ICmp's operands' types are wider than the needed type
4812  // before attempting to call getSCEV on them. This avoids infinite
4813  // recursion, since the analysis of widening casts can require loop
4814  // exit condition information for overflow checking, which would
4815  // lead back here.
4816  if (getTypeSizeInBits(LHS->getType()) <
4817      getTypeSizeInBits(ICI->getOperand(0)->getType()))
4818    return false;
4819
4820  // Now that we found a conditional branch that dominates the loop, check to
4821  // see if it is the comparison we are looking for.
4822  ICmpInst::Predicate FoundPred;
4823  if (Inverse)
4824    FoundPred = ICI->getInversePredicate();
4825  else
4826    FoundPred = ICI->getPredicate();
4827
4828  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
4829  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
4830
4831  // Balance the types. The case where FoundLHS' type is wider than
4832  // LHS' type is checked for above.
4833  if (getTypeSizeInBits(LHS->getType()) >
4834      getTypeSizeInBits(FoundLHS->getType())) {
4835    if (CmpInst::isSigned(Pred)) {
4836      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4837      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4838    } else {
4839      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4840      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4841    }
4842  }
4843
4844  // Canonicalize the query to match the way instcombine will have
4845  // canonicalized the comparison.
4846  // First, put a constant operand on the right.
4847  if (isa<SCEVConstant>(LHS)) {
4848    std::swap(LHS, RHS);
4849    Pred = ICmpInst::getSwappedPredicate(Pred);
4850  }
4851  // Then, canonicalize comparisons with boundary cases.
4852  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4853    const APInt &RA = RC->getValue()->getValue();
4854    switch (Pred) {
4855    default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4856    case ICmpInst::ICMP_EQ:
4857    case ICmpInst::ICMP_NE:
4858      break;
4859    case ICmpInst::ICMP_UGE:
4860      if ((RA - 1).isMinValue()) {
4861        Pred = ICmpInst::ICMP_NE;
4862        RHS = getConstant(RA - 1);
4863        break;
4864      }
4865      if (RA.isMaxValue()) {
4866        Pred = ICmpInst::ICMP_EQ;
4867        break;
4868      }
4869      if (RA.isMinValue()) return true;
4870      break;
4871    case ICmpInst::ICMP_ULE:
4872      if ((RA + 1).isMaxValue()) {
4873        Pred = ICmpInst::ICMP_NE;
4874        RHS = getConstant(RA + 1);
4875        break;
4876      }
4877      if (RA.isMinValue()) {
4878        Pred = ICmpInst::ICMP_EQ;
4879        break;
4880      }
4881      if (RA.isMaxValue()) return true;
4882      break;
4883    case ICmpInst::ICMP_SGE:
4884      if ((RA - 1).isMinSignedValue()) {
4885        Pred = ICmpInst::ICMP_NE;
4886        RHS = getConstant(RA - 1);
4887        break;
4888      }
4889      if (RA.isMaxSignedValue()) {
4890        Pred = ICmpInst::ICMP_EQ;
4891        break;
4892      }
4893      if (RA.isMinSignedValue()) return true;
4894      break;
4895    case ICmpInst::ICMP_SLE:
4896      if ((RA + 1).isMaxSignedValue()) {
4897        Pred = ICmpInst::ICMP_NE;
4898        RHS = getConstant(RA + 1);
4899        break;
4900      }
4901      if (RA.isMinSignedValue()) {
4902        Pred = ICmpInst::ICMP_EQ;
4903        break;
4904      }
4905      if (RA.isMaxSignedValue()) return true;
4906      break;
4907    case ICmpInst::ICMP_UGT:
4908      if (RA.isMinValue()) {
4909        Pred = ICmpInst::ICMP_NE;
4910        break;
4911      }
4912      if ((RA + 1).isMaxValue()) {
4913        Pred = ICmpInst::ICMP_EQ;
4914        RHS = getConstant(RA + 1);
4915        break;
4916      }
4917      if (RA.isMaxValue()) return false;
4918      break;
4919    case ICmpInst::ICMP_ULT:
4920      if (RA.isMaxValue()) {
4921        Pred = ICmpInst::ICMP_NE;
4922        break;
4923      }
4924      if ((RA - 1).isMinValue()) {
4925        Pred = ICmpInst::ICMP_EQ;
4926        RHS = getConstant(RA - 1);
4927        break;
4928      }
4929      if (RA.isMinValue()) return false;
4930      break;
4931    case ICmpInst::ICMP_SGT:
4932      if (RA.isMinSignedValue()) {
4933        Pred = ICmpInst::ICMP_NE;
4934        break;
4935      }
4936      if ((RA + 1).isMaxSignedValue()) {
4937        Pred = ICmpInst::ICMP_EQ;
4938        RHS = getConstant(RA + 1);
4939        break;
4940      }
4941      if (RA.isMaxSignedValue()) return false;
4942      break;
4943    case ICmpInst::ICMP_SLT:
4944      if (RA.isMaxSignedValue()) {
4945        Pred = ICmpInst::ICMP_NE;
4946        break;
4947      }
4948      if ((RA - 1).isMinSignedValue()) {
4949       Pred = ICmpInst::ICMP_EQ;
4950       RHS = getConstant(RA - 1);
4951       break;
4952      }
4953      if (RA.isMinSignedValue()) return false;
4954      break;
4955    }
4956  }
4957
4958  // Check to see if we can make the LHS or RHS match.
4959  if (LHS == FoundRHS || RHS == FoundLHS) {
4960    if (isa<SCEVConstant>(RHS)) {
4961      std::swap(FoundLHS, FoundRHS);
4962      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
4963    } else {
4964      std::swap(LHS, RHS);
4965      Pred = ICmpInst::getSwappedPredicate(Pred);
4966    }
4967  }
4968
4969  // Check whether the found predicate is the same as the desired predicate.
4970  if (FoundPred == Pred)
4971    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
4972
4973  // Check whether swapping the found predicate makes it the same as the
4974  // desired predicate.
4975  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
4976    if (isa<SCEVConstant>(RHS))
4977      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
4978    else
4979      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
4980                                   RHS, LHS, FoundLHS, FoundRHS);
4981  }
4982
4983  // Check whether the actual condition is beyond sufficient.
4984  if (FoundPred == ICmpInst::ICMP_EQ)
4985    if (ICmpInst::isTrueWhenEqual(Pred))
4986      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
4987        return true;
4988  if (Pred == ICmpInst::ICMP_NE)
4989    if (!ICmpInst::isTrueWhenEqual(FoundPred))
4990      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
4991        return true;
4992
4993  // Otherwise assume the worst.
4994  return false;
4995}
4996
4997/// isImpliedCondOperands - Test whether the condition described by Pred,
4998/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
4999/// and FoundRHS is true.
5000bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
5001                                            const SCEV *LHS, const SCEV *RHS,
5002                                            const SCEV *FoundLHS,
5003                                            const SCEV *FoundRHS) {
5004  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
5005                                     FoundLHS, FoundRHS) ||
5006         // ~x < ~y --> x > y
5007         isImpliedCondOperandsHelper(Pred, LHS, RHS,
5008                                     getNotSCEV(FoundRHS),
5009                                     getNotSCEV(FoundLHS));
5010}
5011
5012/// isImpliedCondOperandsHelper - Test whether the condition described by
5013/// Pred, LHS, and RHS is true whenever the condition described by Pred,
5014/// FoundLHS, and FoundRHS is true.
5015bool
5016ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
5017                                             const SCEV *LHS, const SCEV *RHS,
5018                                             const SCEV *FoundLHS,
5019                                             const SCEV *FoundRHS) {
5020  switch (Pred) {
5021  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5022  case ICmpInst::ICMP_EQ:
5023  case ICmpInst::ICMP_NE:
5024    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
5025      return true;
5026    break;
5027  case ICmpInst::ICMP_SLT:
5028  case ICmpInst::ICMP_SLE:
5029    if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
5030        isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
5031      return true;
5032    break;
5033  case ICmpInst::ICMP_SGT:
5034  case ICmpInst::ICMP_SGE:
5035    if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
5036        isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
5037      return true;
5038    break;
5039  case ICmpInst::ICMP_ULT:
5040  case ICmpInst::ICMP_ULE:
5041    if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
5042        isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
5043      return true;
5044    break;
5045  case ICmpInst::ICMP_UGT:
5046  case ICmpInst::ICMP_UGE:
5047    if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
5048        isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
5049      return true;
5050    break;
5051  }
5052
5053  return false;
5054}
5055
5056/// getBECount - Subtract the end and start values and divide by the step,
5057/// rounding up, to get the number of times the backedge is executed. Return
5058/// CouldNotCompute if an intermediate computation overflows.
5059const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
5060                                        const SCEV *End,
5061                                        const SCEV *Step,
5062                                        bool NoWrap) {
5063  assert(!isKnownNegative(Step) &&
5064         "This code doesn't handle negative strides yet!");
5065
5066  const Type *Ty = Start->getType();
5067  const SCEV *NegOne = getIntegerSCEV(-1, Ty);
5068  const SCEV *Diff = getMinusSCEV(End, Start);
5069  const SCEV *RoundUp = getAddExpr(Step, NegOne);
5070
5071  // Add an adjustment to the difference between End and Start so that
5072  // the division will effectively round up.
5073  const SCEV *Add = getAddExpr(Diff, RoundUp);
5074
5075  if (!NoWrap) {
5076    // Check Add for unsigned overflow.
5077    // TODO: More sophisticated things could be done here.
5078    const Type *WideTy = IntegerType::get(getContext(),
5079                                          getTypeSizeInBits(Ty) + 1);
5080    const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
5081    const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
5082    const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
5083    if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
5084      return getCouldNotCompute();
5085  }
5086
5087  return getUDivExpr(Add, Step);
5088}
5089
5090/// HowManyLessThans - Return the number of times a backedge containing the
5091/// specified less-than comparison will execute.  If not computable, return
5092/// CouldNotCompute.
5093ScalarEvolution::BackedgeTakenInfo
5094ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
5095                                  const Loop *L, bool isSigned) {
5096  // Only handle:  "ADDREC < LoopInvariant".
5097  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
5098
5099  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
5100  if (!AddRec || AddRec->getLoop() != L)
5101    return getCouldNotCompute();
5102
5103  // Check to see if we have a flag which makes analysis easy.
5104  bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
5105                           AddRec->hasNoUnsignedWrap();
5106
5107  if (AddRec->isAffine()) {
5108    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
5109    const SCEV *Step = AddRec->getStepRecurrence(*this);
5110
5111    if (Step->isZero())
5112      return getCouldNotCompute();
5113    if (Step->isOne()) {
5114      // With unit stride, the iteration never steps past the limit value.
5115    } else if (isKnownPositive(Step)) {
5116      // Test whether a positive iteration can step past the limit
5117      // value and past the maximum value for its type in a single step.
5118      // Note that it's not sufficient to check NoWrap here, because even
5119      // though the value after a wrap is undefined, it's not undefined
5120      // behavior, so if wrap does occur, the loop could either terminate or
5121      // loop infinitely, but in either case, the loop is guaranteed to
5122      // iterate at least until the iteration where the wrapping occurs.
5123      const SCEV *One = getIntegerSCEV(1, Step->getType());
5124      if (isSigned) {
5125        APInt Max = APInt::getSignedMaxValue(BitWidth);
5126        if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
5127              .slt(getSignedRange(RHS).getSignedMax()))
5128          return getCouldNotCompute();
5129      } else {
5130        APInt Max = APInt::getMaxValue(BitWidth);
5131        if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
5132              .ult(getUnsignedRange(RHS).getUnsignedMax()))
5133          return getCouldNotCompute();
5134      }
5135    } else
5136      // TODO: Handle negative strides here and below.
5137      return getCouldNotCompute();
5138
5139    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5140    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
5141    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5142    // treat m-n as signed nor unsigned due to overflow possibility.
5143
5144    // First, we get the value of the LHS in the first iteration: n
5145    const SCEV *Start = AddRec->getOperand(0);
5146
5147    // Determine the minimum constant start value.
5148    const SCEV *MinStart = getConstant(isSigned ?
5149      getSignedRange(Start).getSignedMin() :
5150      getUnsignedRange(Start).getUnsignedMin());
5151
5152    // If we know that the condition is true in order to enter the loop,
5153    // then we know that it will run exactly (m-n)/s times. Otherwise, we
5154    // only know that it will execute (max(m,n)-n)/s times. In both cases,
5155    // the division must round up.
5156    const SCEV *End = RHS;
5157    if (!isLoopEntryGuardedByCond(L,
5158                                  isSigned ? ICmpInst::ICMP_SLT :
5159                                             ICmpInst::ICMP_ULT,
5160                                  getMinusSCEV(Start, Step), RHS))
5161      End = isSigned ? getSMaxExpr(RHS, Start)
5162                     : getUMaxExpr(RHS, Start);
5163
5164    // Determine the maximum constant end value.
5165    const SCEV *MaxEnd = getConstant(isSigned ?
5166      getSignedRange(End).getSignedMax() :
5167      getUnsignedRange(End).getUnsignedMax());
5168
5169    // If MaxEnd is within a step of the maximum integer value in its type,
5170    // adjust it down to the minimum value which would produce the same effect.
5171    // This allows the subsequent ceiling division of (N+(step-1))/step to
5172    // compute the correct value.
5173    const SCEV *StepMinusOne = getMinusSCEV(Step,
5174                                            getIntegerSCEV(1, Step->getType()));
5175    MaxEnd = isSigned ?
5176      getSMinExpr(MaxEnd,
5177                  getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
5178                               StepMinusOne)) :
5179      getUMinExpr(MaxEnd,
5180                  getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
5181                               StepMinusOne));
5182
5183    // Finally, we subtract these two values and divide, rounding up, to get
5184    // the number of times the backedge is executed.
5185    const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
5186
5187    // The maximum backedge count is similar, except using the minimum start
5188    // value and the maximum end value.
5189    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
5190
5191    return BackedgeTakenInfo(BECount, MaxBECount);
5192  }
5193
5194  return getCouldNotCompute();
5195}
5196
5197/// getNumIterationsInRange - Return the number of iterations of this loop that
5198/// produce values in the specified constant range.  Another way of looking at
5199/// this is that it returns the first iteration number where the value is not in
5200/// the condition, thus computing the exit count. If the iteration count can't
5201/// be computed, an instance of SCEVCouldNotCompute is returned.
5202const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
5203                                                    ScalarEvolution &SE) const {
5204  if (Range.isFullSet())  // Infinite loop.
5205    return SE.getCouldNotCompute();
5206
5207  // If the start is a non-zero constant, shift the range to simplify things.
5208  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
5209    if (!SC->getValue()->isZero()) {
5210      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
5211      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
5212      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
5213      if (const SCEVAddRecExpr *ShiftedAddRec =
5214            dyn_cast<SCEVAddRecExpr>(Shifted))
5215        return ShiftedAddRec->getNumIterationsInRange(
5216                           Range.subtract(SC->getValue()->getValue()), SE);
5217      // This is strange and shouldn't happen.
5218      return SE.getCouldNotCompute();
5219    }
5220
5221  // The only time we can solve this is when we have all constant indices.
5222  // Otherwise, we cannot determine the overflow conditions.
5223  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5224    if (!isa<SCEVConstant>(getOperand(i)))
5225      return SE.getCouldNotCompute();
5226
5227
5228  // Okay at this point we know that all elements of the chrec are constants and
5229  // that the start element is zero.
5230
5231  // First check to see if the range contains zero.  If not, the first
5232  // iteration exits.
5233  unsigned BitWidth = SE.getTypeSizeInBits(getType());
5234  if (!Range.contains(APInt(BitWidth, 0)))
5235    return SE.getIntegerSCEV(0, getType());
5236
5237  if (isAffine()) {
5238    // If this is an affine expression then we have this situation:
5239    //   Solve {0,+,A} in Range  ===  Ax in Range
5240
5241    // We know that zero is in the range.  If A is positive then we know that
5242    // the upper value of the range must be the first possible exit value.
5243    // If A is negative then the lower of the range is the last possible loop
5244    // value.  Also note that we already checked for a full range.
5245    APInt One(BitWidth,1);
5246    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
5247    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
5248
5249    // The exit value should be (End+A)/A.
5250    APInt ExitVal = (End + A).udiv(A);
5251    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
5252
5253    // Evaluate at the exit value.  If we really did fall out of the valid
5254    // range, then we computed our trip count, otherwise wrap around or other
5255    // things must have happened.
5256    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
5257    if (Range.contains(Val->getValue()))
5258      return SE.getCouldNotCompute();  // Something strange happened
5259
5260    // Ensure that the previous value is in the range.  This is a sanity check.
5261    assert(Range.contains(
5262           EvaluateConstantChrecAtConstant(this,
5263           ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
5264           "Linear scev computation is off in a bad way!");
5265    return SE.getConstant(ExitValue);
5266  } else if (isQuadratic()) {
5267    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5268    // quadratic equation to solve it.  To do this, we must frame our problem in
5269    // terms of figuring out when zero is crossed, instead of when
5270    // Range.getUpper() is crossed.
5271    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
5272    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
5273    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
5274
5275    // Next, solve the constructed addrec
5276    std::pair<const SCEV *,const SCEV *> Roots =
5277      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
5278    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5279    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5280    if (R1) {
5281      // Pick the smallest positive root value.
5282      if (ConstantInt *CB =
5283          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
5284                         R1->getValue(), R2->getValue()))) {
5285        if (CB->getZExtValue() == false)
5286          std::swap(R1, R2);   // R1 is the minimum root now.
5287
5288        // Make sure the root is not off by one.  The returned iteration should
5289        // not be in the range, but the previous one should be.  When solving
5290        // for "X*X < 5", for example, we should not return a root of 2.
5291        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
5292                                                             R1->getValue(),
5293                                                             SE);
5294        if (Range.contains(R1Val->getValue())) {
5295          // The next iteration must be out of the range...
5296          ConstantInt *NextVal =
5297                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
5298
5299          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5300          if (!Range.contains(R1Val->getValue()))
5301            return SE.getConstant(NextVal);
5302          return SE.getCouldNotCompute();  // Something strange happened
5303        }
5304
5305        // If R1 was not in the range, then it is a good return value.  Make
5306        // sure that R1-1 WAS in the range though, just in case.
5307        ConstantInt *NextVal =
5308               ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
5309        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5310        if (Range.contains(R1Val->getValue()))
5311          return R1;
5312        return SE.getCouldNotCompute();  // Something strange happened
5313      }
5314    }
5315  }
5316
5317  return SE.getCouldNotCompute();
5318}
5319
5320
5321
5322//===----------------------------------------------------------------------===//
5323//                   SCEVCallbackVH Class Implementation
5324//===----------------------------------------------------------------------===//
5325
5326void ScalarEvolution::SCEVCallbackVH::deleted() {
5327  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5328  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
5329    SE->ConstantEvolutionLoopExitValue.erase(PN);
5330  SE->Scalars.erase(getValPtr());
5331  // this now dangles!
5332}
5333
5334void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
5335  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5336
5337  // Forget all the expressions associated with users of the old value,
5338  // so that future queries will recompute the expressions using the new
5339  // value.
5340  SmallVector<User *, 16> Worklist;
5341  SmallPtrSet<User *, 8> Visited;
5342  Value *Old = getValPtr();
5343  bool DeleteOld = false;
5344  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
5345       UI != UE; ++UI)
5346    Worklist.push_back(*UI);
5347  while (!Worklist.empty()) {
5348    User *U = Worklist.pop_back_val();
5349    // Deleting the Old value will cause this to dangle. Postpone
5350    // that until everything else is done.
5351    if (U == Old) {
5352      DeleteOld = true;
5353      continue;
5354    }
5355    if (!Visited.insert(U))
5356      continue;
5357    if (PHINode *PN = dyn_cast<PHINode>(U))
5358      SE->ConstantEvolutionLoopExitValue.erase(PN);
5359    SE->Scalars.erase(U);
5360    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
5361         UI != UE; ++UI)
5362      Worklist.push_back(*UI);
5363  }
5364  // Delete the Old value if it (indirectly) references itself.
5365  if (DeleteOld) {
5366    if (PHINode *PN = dyn_cast<PHINode>(Old))
5367      SE->ConstantEvolutionLoopExitValue.erase(PN);
5368    SE->Scalars.erase(Old);
5369    // this now dangles!
5370  }
5371  // this may dangle!
5372}
5373
5374ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
5375  : CallbackVH(V), SE(se) {}
5376
5377//===----------------------------------------------------------------------===//
5378//                   ScalarEvolution Class Implementation
5379//===----------------------------------------------------------------------===//
5380
5381ScalarEvolution::ScalarEvolution()
5382  : FunctionPass(&ID) {
5383}
5384
5385bool ScalarEvolution::runOnFunction(Function &F) {
5386  this->F = &F;
5387  LI = &getAnalysis<LoopInfo>();
5388  TD = getAnalysisIfAvailable<TargetData>();
5389  DT = &getAnalysis<DominatorTree>();
5390  return false;
5391}
5392
5393void ScalarEvolution::releaseMemory() {
5394  Scalars.clear();
5395  BackedgeTakenCounts.clear();
5396  ConstantEvolutionLoopExitValue.clear();
5397  ValuesAtScopes.clear();
5398  UniqueSCEVs.clear();
5399  SCEVAllocator.Reset();
5400}
5401
5402void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
5403  AU.setPreservesAll();
5404  AU.addRequiredTransitive<LoopInfo>();
5405  AU.addRequiredTransitive<DominatorTree>();
5406}
5407
5408bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
5409  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
5410}
5411
5412static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
5413                          const Loop *L) {
5414  // Print all inner loops first
5415  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
5416    PrintLoopInfo(OS, SE, *I);
5417
5418  OS << "Loop ";
5419  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5420  OS << ": ";
5421
5422  SmallVector<BasicBlock *, 8> ExitBlocks;
5423  L->getExitBlocks(ExitBlocks);
5424  if (ExitBlocks.size() != 1)
5425    OS << "<multiple exits> ";
5426
5427  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5428    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5429  } else {
5430    OS << "Unpredictable backedge-taken count. ";
5431  }
5432
5433  OS << "\n"
5434        "Loop ";
5435  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5436  OS << ": ";
5437
5438  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5439    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5440  } else {
5441    OS << "Unpredictable max backedge-taken count. ";
5442  }
5443
5444  OS << "\n";
5445}
5446
5447void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
5448  // ScalarEvolution's implementation of the print method is to print
5449  // out SCEV values of all instructions that are interesting. Doing
5450  // this potentially causes it to create new SCEV objects though,
5451  // which technically conflicts with the const qualifier. This isn't
5452  // observable from outside the class though, so casting away the
5453  // const isn't dangerous.
5454  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
5455
5456  OS << "Classifying expressions for: ";
5457  WriteAsOperand(OS, F, /*PrintType=*/false);
5458  OS << "\n";
5459  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5460    if (isSCEVable(I->getType())) {
5461      OS << *I << '\n';
5462      OS << "  -->  ";
5463      const SCEV *SV = SE.getSCEV(&*I);
5464      SV->print(OS);
5465
5466      const Loop *L = LI->getLoopFor((*I).getParent());
5467
5468      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5469      if (AtUse != SV) {
5470        OS << "  -->  ";
5471        AtUse->print(OS);
5472      }
5473
5474      if (L) {
5475        OS << "\t\t" "Exits: ";
5476        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5477        if (!ExitValue->isLoopInvariant(L)) {
5478          OS << "<<Unknown>>";
5479        } else {
5480          OS << *ExitValue;
5481        }
5482      }
5483
5484      OS << "\n";
5485    }
5486
5487  OS << "Determining loop execution counts for: ";
5488  WriteAsOperand(OS, F, /*PrintType=*/false);
5489  OS << "\n";
5490  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5491    PrintLoopInfo(OS, &SE, *I);
5492}
5493
5494